]> git.proxmox.com Git - mirror_ovs.git/blob - lib/cmap.h
038db6c04d5a88fd120904d33f01d4c74d4f46f1
[mirror_ovs.git] / lib / cmap.h
1 /*
2 * Copyright (c) 2014 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #ifndef CMAP_H
18 #define CMAP_H 1
19
20 #include <stdbool.h>
21 #include <stdint.h>
22 #include "ovs-rcu.h"
23 #include "util.h"
24
25 /* Concurrent hash map
26 * ===================
27 *
28 * A single-writer, multiple-reader hash table that efficiently supports
29 * duplicates.
30 *
31 *
32 * Thread-safety
33 * =============
34 *
35 * The general rules are:
36 *
37 * - Only a single thread may safely call into cmap_insert(),
38 * cmap_remove(), or cmap_replace() at any given time.
39 *
40 * - Any number of threads may use functions and macros that search or
41 * iterate through a given cmap, even in parallel with other threads
42 * calling cmap_insert(), cmap_remove(), or cmap_replace().
43 *
44 * There is one exception: cmap_find_protected() is only safe if no thread
45 * is currently calling cmap_insert(), cmap_remove(), or cmap_replace().
46 * (Use ordinary cmap_find() if that is not guaranteed.)
47 *
48 * - See "Iteration" below for additional thread safety rules.
49 *
50 * Writers must use special care to ensure that any elements that they remove
51 * do not get freed or reused until readers have finished with them. This
52 * includes inserting the element back into its original cmap or a different
53 * one. One correct way to do this is to free them from an RCU callback with
54 * ovsrcu_postpone().
55 */
56
57 /* A concurrent hash map node, to be embedded inside the data structure being
58 * mapped.
59 *
60 * All nodes linked together on a chain have exactly the same hash value. */
61 struct cmap_node {
62 OVSRCU_TYPE(struct cmap_node *) next; /* Next node with same hash. */
63 };
64
65 static inline struct cmap_node *
66 cmap_node_next(const struct cmap_node *node)
67 {
68 return ovsrcu_get(struct cmap_node *, &node->next);
69 }
70
71 static inline struct cmap_node *
72 cmap_node_next_protected(const struct cmap_node *node)
73 {
74 return ovsrcu_get_protected(struct cmap_node *, &node->next);
75 }
76
77 /* Concurrent hash map. */
78 struct cmap {
79 OVSRCU_TYPE(struct cmap_impl *) impl;
80 };
81
82 /* Initialization. */
83 void cmap_init(struct cmap *);
84 void cmap_destroy(struct cmap *);
85
86 /* Count. */
87 size_t cmap_count(const struct cmap *);
88 bool cmap_is_empty(const struct cmap *);
89
90 /* Insertion and deletion. */
91 void cmap_insert(struct cmap *, struct cmap_node *, uint32_t hash);
92 void cmap_remove(struct cmap *, struct cmap_node *, uint32_t hash);
93 void cmap_replace(struct cmap *, struct cmap_node *old_node,
94 struct cmap_node *new_node, uint32_t hash);
95
96 /* Search.
97 *
98 * These macros iterate NODE over all of the nodes in CMAP that have hash value
99 * equal to HASH. MEMBER must be the name of the 'struct cmap_node' member
100 * within NODE.
101 *
102 * CMAP and HASH are evaluated only once. NODE is evaluated many times.
103 *
104 *
105 * Thread-safety
106 * =============
107 *
108 * CMAP_FOR_EACH_WITH_HASH will reliably visit each of the nodes with the
109 * specified hash in CMAP, even with concurrent insertions and deletions. (Of
110 * course, if nodes with the given HASH are being inserted or deleted, it might
111 * or might not visit the nodes actually being inserted or deleted.)
112 *
113 * CMAP_FOR_EACH_WITH_HASH_PROTECTED may only be used if CMAP is guaranteed not
114 * to change during iteration. It may be very slightly faster.
115 */
116 #define CMAP_FOR_EACH_WITH_HASH(NODE, MEMBER, HASH, CMAP) \
117 for (ASSIGN_CONTAINER(NODE, cmap_find(CMAP, HASH), MEMBER); \
118 (NODE) != OBJECT_CONTAINING(NULL, NODE, MEMBER); \
119 ASSIGN_CONTAINER(NODE, cmap_node_next(&(NODE)->MEMBER), MEMBER))
120 #define CMAP_FOR_EACH_WITH_HASH_PROTECTED(NODE, MEMBER, HASH, CMAP) \
121 for (ASSIGN_CONTAINER(NODE, cmap_find_locked(CMAP, HASH), MEMBER); \
122 (NODE) != OBJECT_CONTAINING(NULL, NODE, MEMBER); \
123 ASSIGN_CONTAINER(NODE, cmap_node_next_protected(&(NODE)->MEMBER), \
124 MEMBER))
125
126 struct cmap_node *cmap_find(const struct cmap *, uint32_t hash);
127 struct cmap_node *cmap_find_protected(const struct cmap *, uint32_t hash);
128
129 /* Iteration.
130 *
131 *
132 * Thread-safety
133 * =============
134 *
135 * Iteration is safe even in a cmap that is changing concurrently. However:
136 *
137 * - In the presence of concurrent calls to cmap_insert(), any given
138 * iteration might skip some nodes and might visit some nodes more than
139 * once. If this is a problem, then the iterating code should lock the
140 * data structure (a rwlock can be used to allow multiple threads to
141 * iterate in parallel).
142 *
143 * - Concurrent calls to cmap_remove() don't have the same problem. (A
144 * node being deleted may be visited once or not at all. Other nodes
145 * will be visited once.)
146 *
147 *
148 * Example
149 * =======
150 *
151 * struct my_node {
152 * struct cmap_node cmap_node;
153 * int extra_data;
154 * };
155 *
156 * struct cmap_cursor cursor;
157 * struct my_node *iter;
158 * struct cmap my_map;
159 *
160 * cmap_init(&cmap);
161 * ...add data...
162 * CMAP_FOR_EACH (my_node, cmap_node, &cursor, &cmap) {
163 * ...operate on my_node...
164 * }
165 *
166 * CMAP_FOR_EACH is "safe" in the sense of HMAP_FOR_EACH_SAFE. That is, it is
167 * safe to free the current node before going on to the next iteration. Most
168 * of the time, though, this doesn't matter for a cmap because node
169 * deallocation has to be postponed until the next grace period. This means
170 * that this guarantee is useful only in deallocation code already executing at
171 * postponed time, when it is known that the RCU grace period has already
172 * expired.
173 */
174
175 #define CMAP_CURSOR_FOR_EACH__(NODE, CURSOR, MEMBER) \
176 ((CURSOR)->node \
177 ? (ASSIGN_CONTAINER(NODE, (CURSOR)->node, MEMBER), \
178 cmap_cursor_advance(CURSOR), \
179 true) \
180 : false)
181
182 #define CMAP_CURSOR_FOR_EACH(NODE, MEMBER, CURSOR, CMAP) \
183 for (*(CURSOR) = cmap_cursor_start(CMAP); \
184 CMAP_CURSOR_FOR_EACH__(NODE, CURSOR, MEMBER); \
185 )
186
187 #define CMAP_CURSOR_FOR_EACH_CONTINUE(NODE, MEMBER, CURSOR) \
188 while (CMAP_CURSOR_FOR_EACH__(NODE, CURSOR, MEMBER))
189
190 struct cmap_cursor {
191 const struct cmap_impl *impl;
192 uint32_t bucket_idx;
193 int entry_idx;
194 struct cmap_node *node;
195 };
196
197 struct cmap_cursor cmap_cursor_start(const struct cmap *);
198 void cmap_cursor_advance(struct cmap_cursor *);
199
200 #define CMAP_FOR_EACH(NODE, MEMBER, CMAP) \
201 for (struct cmap_cursor cursor__ = cmap_cursor_start(CMAP); \
202 CMAP_CURSOR_FOR_EACH__(NODE, &cursor__, MEMBER); \
203 )
204
205 static inline struct cmap_node *cmap_first(const struct cmap *);
206
207 /* Another, less preferred, form of iteration, for use in situations where it
208 * is difficult to maintain a pointer to a cmap_node. */
209 struct cmap_position {
210 unsigned int bucket;
211 unsigned int entry;
212 unsigned int offset;
213 };
214
215 struct cmap_node *cmap_next_position(const struct cmap *,
216 struct cmap_position *);
217
218 /* Returns the first node in 'cmap', in arbitrary order, or a null pointer if
219 * 'cmap' is empty. */
220 static inline struct cmap_node *
221 cmap_first(const struct cmap *cmap)
222 {
223 struct cmap_position pos = { 0, 0, 0 };
224
225 return cmap_next_position(cmap, &pos);
226 }
227
228 #endif /* cmap.h */