]>
git.proxmox.com Git - mirror_ovs.git/blob - lib/cmap.h
2 * Copyright (c) 2014, 2016 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
25 /* Concurrent hash map
28 * A single-writer, multiple-reader hash table that efficiently supports
35 * The general rules are:
37 * - Only a single thread may safely call into cmap_insert(),
38 * cmap_remove(), or cmap_replace() at any given time.
40 * - Any number of threads may use functions and macros that search or
41 * iterate through a given cmap, even in parallel with other threads
42 * calling cmap_insert(), cmap_remove(), or cmap_replace().
44 * There is one exception: cmap_find_protected() is only safe if no thread
45 * is currently calling cmap_insert(), cmap_remove(), or cmap_replace().
46 * (Use ordinary cmap_find() if that is not guaranteed.)
48 * - See "Iteration" below for additional thread safety rules.
50 * Writers must use special care to ensure that any elements that they remove
51 * do not get freed or reused until readers have finished with them. This
52 * includes inserting the element back into its original cmap or a different
53 * one. One correct way to do this is to free them from an RCU callback with
57 /* A concurrent hash map node, to be embedded inside the data structure being
60 * All nodes linked together on a chain have exactly the same hash value. */
62 OVSRCU_TYPE(struct cmap_node
*) next
; /* Next node with same hash. */
65 static inline struct cmap_node
*
66 cmap_node_next(const struct cmap_node
*node
)
68 return ovsrcu_get(struct cmap_node
*, &node
->next
);
71 static inline struct cmap_node
*
72 cmap_node_next_protected(const struct cmap_node
*node
)
74 return ovsrcu_get_protected(struct cmap_node
*, &node
->next
);
77 /* Concurrent hash map. */
79 OVSRCU_TYPE(struct cmap_impl
*) impl
;
82 /* Initializer for an empty cmap. */
83 #define CMAP_INITIALIZER { \
84 .impl = OVSRCU_INITIALIZER((struct cmap_impl *) &empty_cmap) \
86 extern OVS_ALIGNED_VAR(CACHE_LINE_SIZE
) const struct cmap_impl empty_cmap
;
89 void cmap_init(struct cmap
*);
90 void cmap_destroy(struct cmap
*);
93 size_t cmap_count(const struct cmap
*);
94 bool cmap_is_empty(const struct cmap
*);
96 /* Insertion and deletion. Return the current count after the operation. */
97 size_t cmap_insert(struct cmap
*, struct cmap_node
*, uint32_t hash
);
98 static inline size_t cmap_remove(struct cmap
*, struct cmap_node
*,
100 size_t cmap_replace(struct cmap
*, struct cmap_node
*old_node
,
101 struct cmap_node
*new_node
, uint32_t hash
);
105 * These macros iterate NODE over all of the nodes in CMAP that have hash value
106 * equal to HASH. MEMBER must be the name of the 'struct cmap_node' member
109 * CMAP and HASH are evaluated only once. NODE is evaluated many times.
115 * CMAP_NODE_FOR_EACH will reliably visit each of the nodes starting with
116 * CMAP_NODE, even with concurrent insertions and deletions. (Of
117 * course, if nodes are being inserted or deleted, it might or might not visit
118 * the nodes actually being inserted or deleted.)
120 * CMAP_NODE_FOR_EACH_PROTECTED may only be used if the containing CMAP is
121 * guaranteed not to change during iteration. It may be only slightly faster.
123 * CMAP_FOR_EACH_WITH_HASH will reliably visit each of the nodes with the
124 * specified hash in CMAP, even with concurrent insertions and deletions. (Of
125 * course, if nodes with the given HASH are being inserted or deleted, it might
126 * or might not visit the nodes actually being inserted or deleted.)
128 * CMAP_FOR_EACH_WITH_HASH_PROTECTED may only be used if CMAP is guaranteed not
129 * to change during iteration. It may be very slightly faster.
131 #define CMAP_NODE_FOR_EACH(NODE, MEMBER, CMAP_NODE) \
132 for (INIT_CONTAINER(NODE, CMAP_NODE, MEMBER); \
133 (NODE) != OBJECT_CONTAINING(NULL, NODE, MEMBER); \
134 ASSIGN_CONTAINER(NODE, cmap_node_next(&(NODE)->MEMBER), MEMBER))
135 #define CMAP_NODE_FOR_EACH_PROTECTED(NODE, MEMBER, CMAP_NODE) \
136 for (INIT_CONTAINER(NODE, CMAP_NODE, MEMBER); \
137 (NODE) != OBJECT_CONTAINING(NULL, NODE, MEMBER); \
138 ASSIGN_CONTAINER(NODE, cmap_node_next_protected(&(NODE)->MEMBER), \
140 #define CMAP_FOR_EACH_WITH_HASH(NODE, MEMBER, HASH, CMAP) \
141 CMAP_NODE_FOR_EACH(NODE, MEMBER, cmap_find(CMAP, HASH))
142 #define CMAP_FOR_EACH_WITH_HASH_PROTECTED(NODE, MEMBER, HASH, CMAP) \
143 CMAP_NODE_FOR_EACH_PROTECTED(NODE, MEMBER, cmap_find_locked(CMAP, HASH))
145 const struct cmap_node
*cmap_find(const struct cmap
*, uint32_t hash
);
146 struct cmap_node
*cmap_find_protected(const struct cmap
*, uint32_t hash
);
148 /* Looks up multiple 'hashes', when the corresponding bit in 'map' is 1,
149 * and sets the corresponding pointer in 'nodes', if the hash value was
150 * found from the 'cmap'. In other cases the 'nodes' values are not changed,
151 * i.e., no NULL pointers are stored there.
152 * Returns a map where a bit is set to 1 if the corresponding 'nodes' pointer
153 * was stored, 0 otherwise.
154 * Generally, the caller wants to use CMAP_NODE_FOR_EACH to verify for
155 * hash collisions. */
156 unsigned long cmap_find_batch(const struct cmap
*cmap
, unsigned long map
,
158 const struct cmap_node
*nodes
[]);
166 * Iteration is safe even in a cmap that is changing concurrently. However:
168 * - In the presence of concurrent calls to cmap_insert(), any given
169 * iteration might skip some nodes and might visit some nodes more than
170 * once. If this is a problem, then the iterating code should lock the
171 * data structure (a rwlock can be used to allow multiple threads to
172 * iterate in parallel).
174 * - Concurrent calls to cmap_remove() don't have the same problem. (A
175 * node being deleted may be visited once or not at all. Other nodes
176 * will be visited once.)
178 * - If the cmap is changing, it is not safe to quiesce while iterating.
179 * Even if the changes are done by the same thread that's performing the
180 * iteration (Corollary: it is not safe to call cmap_remove() and quiesce
188 * struct cmap_node cmap_node;
192 * struct cmap_cursor cursor;
193 * struct my_node *iter;
194 * struct cmap my_map;
198 * CMAP_FOR_EACH (my_node, cmap_node, &cursor, &cmap) {
199 * ...operate on my_node...
202 * CMAP_FOR_EACH is "safe" in the sense of HMAP_FOR_EACH_SAFE. That is, it is
203 * safe to free the current node before going on to the next iteration. Most
204 * of the time, though, this doesn't matter for a cmap because node
205 * deallocation has to be postponed until the next grace period. This means
206 * that this guarantee is useful only in deallocation code already executing at
207 * postponed time, when it is known that the RCU grace period has already
211 #define CMAP_CURSOR_FOR_EACH__(NODE, CURSOR, MEMBER) \
213 ? (INIT_CONTAINER(NODE, (CURSOR)->node, MEMBER), \
214 cmap_cursor_advance(CURSOR), \
218 #define CMAP_CURSOR_FOR_EACH(NODE, MEMBER, CURSOR, CMAP) \
219 for (*(CURSOR) = cmap_cursor_start(CMAP); \
220 CMAP_CURSOR_FOR_EACH__(NODE, CURSOR, MEMBER); \
223 #define CMAP_CURSOR_FOR_EACH_CONTINUE(NODE, MEMBER, CURSOR) \
224 while (CMAP_CURSOR_FOR_EACH__(NODE, CURSOR, MEMBER))
227 const struct cmap_impl
*impl
;
230 struct cmap_node
*node
;
233 struct cmap_cursor
cmap_cursor_start(const struct cmap
*);
234 void cmap_cursor_advance(struct cmap_cursor
*);
236 #define CMAP_FOR_EACH(NODE, MEMBER, CMAP) \
237 for (struct cmap_cursor cursor__ = cmap_cursor_start(CMAP); \
238 CMAP_CURSOR_FOR_EACH__(NODE, &cursor__, MEMBER); \
241 static inline struct cmap_node
*cmap_first(const struct cmap
*);
243 /* Another, less preferred, form of iteration, for use in situations where it
244 * is difficult to maintain a pointer to a cmap_node. */
245 struct cmap_position
{
251 struct cmap_node
*cmap_next_position(const struct cmap
*,
252 struct cmap_position
*);
254 /* Returns the first node in 'cmap', in arbitrary order, or a null pointer if
255 * 'cmap' is empty. */
256 static inline struct cmap_node
*
257 cmap_first(const struct cmap
*cmap
)
259 struct cmap_position pos
= { 0, 0, 0 };
261 return cmap_next_position(cmap
, &pos
);
264 /* Removes 'node' from 'cmap'. The caller must ensure that 'cmap' cannot
265 * change concurrently (from another thread).
267 * 'node' must not be destroyed or modified or inserted back into 'cmap' or
268 * into any other concurrent hash map while any other thread might be accessing
269 * it. One correct way to do this is to free it from an RCU callback with
272 * Returns the current number of nodes in the cmap after the removal. */
274 cmap_remove(struct cmap
*cmap
, struct cmap_node
*node
, uint32_t hash
)
276 return cmap_replace(cmap
, node
, NULL
, hash
);