]>
Commit | Line | Data |
---|---|---|
3e41733f DL |
1 | /* |
2 | * Copyright (c) 2017-19 David Lamparter, for NetDEF, Inc. | |
3 | * | |
4 | * Permission to use, copy, modify, and distribute this software for any | |
5 | * purpose with or without fee is hereby granted, provided that the above | |
6 | * copyright notice and this permission notice appear in all copies. | |
7 | * | |
8 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | |
9 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | |
10 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | |
11 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | |
12 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | |
13 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | |
14 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | |
15 | */ | |
16 | ||
17 | #ifndef _FRRCU_H | |
18 | #define _FRRCU_H | |
19 | ||
20 | #include "memory.h" | |
21 | #include "atomlist.h" | |
3e41733f | 22 | |
17e38209 RW |
23 | #ifdef __cplusplus |
24 | extern "C" { | |
25 | #endif | |
26 | ||
3e41733f DL |
27 | /* quick RCU primer: |
28 | * There's a global sequence counter. Whenever a thread does a | |
29 | * rcu_read_lock(), it is marked as holding the current sequence counter. | |
30 | * When something is cleaned with RCU, the global sequence counter is | |
31 | * increased and the item is queued for cleanup - *after* all threads are | |
32 | * at a more recent sequence counter (or no sequence counter / unheld). | |
33 | * | |
34 | * So, by delaying resource cleanup, RCU ensures that things don't go away | |
35 | * while another thread may hold a (stale) reference. | |
36 | * | |
37 | * Note that even if a thread is in rcu_read_lock(), it is invalid for that | |
38 | * thread to access bits after rcu_free() & co on them. This is a design | |
39 | * choice to allow no-op'ing out the entire RCU mechanism if we're running | |
40 | * singlethreaded. (Also allows some optimization on the counter bumping.) | |
41 | * | |
42 | * differences from Linux Kernel RCU: | |
43 | * - there's no rcu_synchronize(), if you really need to defer something | |
44 | * use rcu_call() (and double check it's really necessary) | |
45 | * - rcu_dereference() and rcu_assign_pointer() don't exist, use atomic_* | |
46 | * instead (ATOM* list structures do the right thing) | |
47 | */ | |
48 | ||
49 | /* opaque */ | |
50 | struct rcu_thread; | |
51 | ||
52 | /* called before new thread creation, sets up rcu thread info for new thread | |
53 | * before it actually exits. This ensures possible RCU references are held | |
54 | * for thread startup. | |
55 | * | |
56 | * return value must be passed into the new thread's call to rcu_thread_start() | |
57 | */ | |
58 | extern struct rcu_thread *rcu_thread_prepare(void); | |
59 | ||
60 | /* cleanup in case pthread_create() fails */ | |
61 | extern void rcu_thread_unprepare(struct rcu_thread *rcu_thread); | |
62 | ||
63 | /* called early in the new thread, with the return value from the above. | |
64 | * NB: new thread is initially in RCU-held state! (at depth 1) | |
65 | * | |
66 | * TBD: maybe inherit RCU state from rcu_thread_prepare()? | |
67 | */ | |
68 | extern void rcu_thread_start(struct rcu_thread *rcu_thread); | |
69 | ||
70 | /* thread exit is handled through pthread_key_create's destructor function */ | |
71 | ||
72 | /* global RCU shutdown - must be called with only 1 active thread left. waits | |
73 | * until remaining RCU actions are done & RCU thread has exited. | |
74 | * | |
75 | * This is mostly here to get a clean exit without memleaks. | |
76 | */ | |
77 | extern void rcu_shutdown(void); | |
78 | ||
79 | /* enter / exit RCU-held state. counter-based, so can be called nested. */ | |
80 | extern void rcu_read_lock(void); | |
81 | extern void rcu_read_unlock(void); | |
82 | ||
83 | /* for debugging / safety checks */ | |
84 | extern void rcu_assert_read_locked(void); | |
85 | extern void rcu_assert_read_unlocked(void); | |
86 | ||
87 | enum rcu_action_type { | |
88 | RCUA_INVALID = 0, | |
89 | /* used internally by the RCU code, shouldn't ever show up outside */ | |
90 | RCUA_NEXT, | |
91 | RCUA_END, | |
92 | /* normal RCU actions, for outside use */ | |
93 | RCUA_FREE, | |
94 | RCUA_CLOSE, | |
95 | RCUA_CALL, | |
96 | }; | |
97 | ||
98 | /* since rcu_head is intended to be embedded into structs which may exist | |
99 | * with lots of copies, rcu_head is shrunk down to its absolute minimum - | |
100 | * the atomlist pointer + a pointer to this action struct. | |
101 | */ | |
102 | struct rcu_action { | |
103 | enum rcu_action_type type; | |
104 | ||
105 | union { | |
106 | struct { | |
107 | struct memtype *mt; | |
108 | ptrdiff_t offset; | |
109 | } free; | |
110 | ||
111 | struct { | |
112 | void (*fptr)(void *arg); | |
113 | ptrdiff_t offset; | |
114 | } call; | |
115 | } u; | |
116 | }; | |
117 | ||
118 | /* RCU cleanup function queue item */ | |
960b9a53 | 119 | PREDECL_ATOMLIST(rcu_heads); |
3e41733f DL |
120 | struct rcu_head { |
121 | struct rcu_heads_item head; | |
122 | const struct rcu_action *action; | |
123 | }; | |
124 | ||
125 | /* special RCU head for delayed fd-close */ | |
126 | struct rcu_head_close { | |
127 | struct rcu_head rcu_head; | |
128 | int fd; | |
129 | }; | |
130 | ||
131 | /* enqueue RCU action - use the macros below to get the rcu_action set up */ | |
132 | extern void rcu_enqueue(struct rcu_head *head, const struct rcu_action *action); | |
133 | ||
134 | /* RCU free() and file close() operations. | |
135 | * | |
136 | * freed memory / closed fds become _immediately_ unavailable to the calling | |
137 | * thread, but will remain available for other threads until they have passed | |
138 | * into RCU-released state. | |
139 | */ | |
140 | ||
141 | /* may be called with NULL mt to do non-MTYPE free() */ | |
142 | #define rcu_free(mtype, ptr, field) \ | |
143 | do { \ | |
144 | typeof(ptr) _ptr = (ptr); \ | |
9fe602a1 DL |
145 | if (!_ptr) \ |
146 | break; \ | |
3e41733f DL |
147 | struct rcu_head *_rcu_head = &_ptr->field; \ |
148 | static const struct rcu_action _rcu_action = { \ | |
149 | .type = RCUA_FREE, \ | |
150 | .u.free = { \ | |
151 | .mt = mtype, \ | |
152 | .offset = offsetof(typeof(*_ptr), field), \ | |
153 | }, \ | |
154 | }; \ | |
155 | rcu_enqueue(_rcu_head, &_rcu_action); \ | |
156 | } while (0) | |
157 | ||
158 | /* use this sparingly, it runs on (and blocks) the RCU thread */ | |
159 | #define rcu_call(func, ptr, field) \ | |
160 | do { \ | |
161 | typeof(ptr) _ptr = (ptr); \ | |
162 | void (*fptype)(typeof(ptr)); \ | |
163 | struct rcu_head *_rcu_head = &_ptr->field; \ | |
164 | static const struct rcu_action _rcu_action = { \ | |
165 | .type = RCUA_CALL, \ | |
166 | .u.call = { \ | |
167 | .fptr = (void *)func, \ | |
168 | .offset = offsetof(typeof(*_ptr), field), \ | |
169 | }, \ | |
170 | }; \ | |
171 | (void)(_fptype = func); \ | |
172 | rcu_enqueue(_rcu_head, &_rcu_action); \ | |
173 | } while (0) | |
174 | ||
175 | extern void rcu_close(struct rcu_head_close *head, int fd); | |
176 | ||
17e38209 RW |
177 | #ifdef __cplusplus |
178 | } | |
179 | #endif | |
180 | ||
3e41733f | 181 | #endif /* _FRRCU_H */ |