]> git.proxmox.com Git - mirror_frr.git/blob - lib/frrcu.h
Merge pull request #5590 from qlyoung/fix-nhrp-underflow
[mirror_frr.git] / lib / frrcu.h
1 /*
2 * Copyright (c) 2017-19 David Lamparter, for NetDEF, Inc.
3 *
4 * Permission to use, copy, modify, and distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #ifndef _FRRCU_H
18 #define _FRRCU_H
19
20 #include "memory.h"
21 #include "atomlist.h"
22 #include "seqlock.h"
23
24 /* quick RCU primer:
25 * There's a global sequence counter. Whenever a thread does a
26 * rcu_read_lock(), it is marked as holding the current sequence counter.
27 * When something is cleaned with RCU, the global sequence counter is
28 * increased and the item is queued for cleanup - *after* all threads are
29 * at a more recent sequence counter (or no sequence counter / unheld).
30 *
31 * So, by delaying resource cleanup, RCU ensures that things don't go away
32 * while another thread may hold a (stale) reference.
33 *
34 * Note that even if a thread is in rcu_read_lock(), it is invalid for that
35 * thread to access bits after rcu_free() & co on them. This is a design
36 * choice to allow no-op'ing out the entire RCU mechanism if we're running
37 * singlethreaded. (Also allows some optimization on the counter bumping.)
38 *
39 * differences from Linux Kernel RCU:
40 * - there's no rcu_synchronize(), if you really need to defer something
41 * use rcu_call() (and double check it's really necessary)
42 * - rcu_dereference() and rcu_assign_pointer() don't exist, use atomic_*
43 * instead (ATOM* list structures do the right thing)
44 */
45
46 /* opaque */
47 struct rcu_thread;
48
49 /* called before new thread creation, sets up rcu thread info for new thread
50 * before it actually exits. This ensures possible RCU references are held
51 * for thread startup.
52 *
53 * return value must be passed into the new thread's call to rcu_thread_start()
54 */
55 extern struct rcu_thread *rcu_thread_prepare(void);
56
57 /* cleanup in case pthread_create() fails */
58 extern void rcu_thread_unprepare(struct rcu_thread *rcu_thread);
59
60 /* called early in the new thread, with the return value from the above.
61 * NB: new thread is initially in RCU-held state! (at depth 1)
62 *
63 * TBD: maybe inherit RCU state from rcu_thread_prepare()?
64 */
65 extern void rcu_thread_start(struct rcu_thread *rcu_thread);
66
67 /* thread exit is handled through pthread_key_create's destructor function */
68
69 /* global RCU shutdown - must be called with only 1 active thread left. waits
70 * until remaining RCU actions are done & RCU thread has exited.
71 *
72 * This is mostly here to get a clean exit without memleaks.
73 */
74 extern void rcu_shutdown(void);
75
76 /* enter / exit RCU-held state. counter-based, so can be called nested. */
77 extern void rcu_read_lock(void);
78 extern void rcu_read_unlock(void);
79
80 /* for debugging / safety checks */
81 extern void rcu_assert_read_locked(void);
82 extern void rcu_assert_read_unlocked(void);
83
84 enum rcu_action_type {
85 RCUA_INVALID = 0,
86 /* used internally by the RCU code, shouldn't ever show up outside */
87 RCUA_NEXT,
88 RCUA_END,
89 /* normal RCU actions, for outside use */
90 RCUA_FREE,
91 RCUA_CLOSE,
92 RCUA_CALL,
93 };
94
95 /* since rcu_head is intended to be embedded into structs which may exist
96 * with lots of copies, rcu_head is shrunk down to its absolute minimum -
97 * the atomlist pointer + a pointer to this action struct.
98 */
99 struct rcu_action {
100 enum rcu_action_type type;
101
102 union {
103 struct {
104 struct memtype *mt;
105 ptrdiff_t offset;
106 } free;
107
108 struct {
109 void (*fptr)(void *arg);
110 ptrdiff_t offset;
111 } call;
112 } u;
113 };
114
115 /* RCU cleanup function queue item */
116 PREDECL_ATOMLIST(rcu_heads)
117 struct rcu_head {
118 struct rcu_heads_item head;
119 const struct rcu_action *action;
120 };
121
122 /* special RCU head for delayed fd-close */
123 struct rcu_head_close {
124 struct rcu_head rcu_head;
125 int fd;
126 };
127
128 /* enqueue RCU action - use the macros below to get the rcu_action set up */
129 extern void rcu_enqueue(struct rcu_head *head, const struct rcu_action *action);
130
131 /* RCU free() and file close() operations.
132 *
133 * freed memory / closed fds become _immediately_ unavailable to the calling
134 * thread, but will remain available for other threads until they have passed
135 * into RCU-released state.
136 */
137
138 /* may be called with NULL mt to do non-MTYPE free() */
139 #define rcu_free(mtype, ptr, field) \
140 do { \
141 typeof(ptr) _ptr = (ptr); \
142 if (!_ptr) \
143 break; \
144 struct rcu_head *_rcu_head = &_ptr->field; \
145 static const struct rcu_action _rcu_action = { \
146 .type = RCUA_FREE, \
147 .u.free = { \
148 .mt = mtype, \
149 .offset = offsetof(typeof(*_ptr), field), \
150 }, \
151 }; \
152 rcu_enqueue(_rcu_head, &_rcu_action); \
153 } while (0)
154
155 /* use this sparingly, it runs on (and blocks) the RCU thread */
156 #define rcu_call(func, ptr, field) \
157 do { \
158 typeof(ptr) _ptr = (ptr); \
159 void (*fptype)(typeof(ptr)); \
160 struct rcu_head *_rcu_head = &_ptr->field; \
161 static const struct rcu_action _rcu_action = { \
162 .type = RCUA_CALL, \
163 .u.call = { \
164 .fptr = (void *)func, \
165 .offset = offsetof(typeof(*_ptr), field), \
166 }, \
167 }; \
168 (void)(_fptype = func); \
169 rcu_enqueue(_rcu_head, &_rcu_action); \
170 } while (0)
171
172 extern void rcu_close(struct rcu_head_close *head, int fd);
173
174 #endif /* _FRRCU_H */