]> git.proxmox.com Git - mirror_ovs.git/blame - lib/cmap.h
Revert "dpif-netdev: includes microsecond delta in meter bucket calculation".
[mirror_ovs.git] / lib / cmap.h
CommitLineData
0e666160 1/*
b70e6976 2 * Copyright (c) 2014, 2016 Nicira, Inc.
0e666160
BP
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef CMAP_H
18#define CMAP_H 1
19
20#include <stdbool.h>
21#include <stdint.h>
22#include "ovs-rcu.h"
23#include "util.h"
24
25/* Concurrent hash map
26 * ===================
27 *
28 * A single-writer, multiple-reader hash table that efficiently supports
29 * duplicates.
30 *
31 *
32 * Thread-safety
33 * =============
34 *
35 * The general rules are:
36 *
9d933fc7
JR
37 * - Only a single thread may safely call into cmap_insert(),
38 * cmap_remove(), or cmap_replace() at any given time.
0e666160
BP
39 *
40 * - Any number of threads may use functions and macros that search or
41 * iterate through a given cmap, even in parallel with other threads
9d933fc7 42 * calling cmap_insert(), cmap_remove(), or cmap_replace().
0e666160
BP
43 *
44 * There is one exception: cmap_find_protected() is only safe if no thread
9d933fc7
JR
45 * is currently calling cmap_insert(), cmap_remove(), or cmap_replace().
46 * (Use ordinary cmap_find() if that is not guaranteed.)
0e666160
BP
47 *
48 * - See "Iteration" below for additional thread safety rules.
49 *
50 * Writers must use special care to ensure that any elements that they remove
51 * do not get freed or reused until readers have finished with them. This
52 * includes inserting the element back into its original cmap or a different
53 * one. One correct way to do this is to free them from an RCU callback with
54 * ovsrcu_postpone().
55 */
56
57/* A concurrent hash map node, to be embedded inside the data structure being
58 * mapped.
59 *
60 * All nodes linked together on a chain have exactly the same hash value. */
61struct cmap_node {
62 OVSRCU_TYPE(struct cmap_node *) next; /* Next node with same hash. */
63};
64
65static inline struct cmap_node *
66cmap_node_next(const struct cmap_node *node)
67{
68 return ovsrcu_get(struct cmap_node *, &node->next);
69}
70
71static inline struct cmap_node *
72cmap_node_next_protected(const struct cmap_node *node)
73{
74 return ovsrcu_get_protected(struct cmap_node *, &node->next);
75}
76
77/* Concurrent hash map. */
78struct cmap {
79 OVSRCU_TYPE(struct cmap_impl *) impl;
80};
81
b70e6976
BP
82/* Initializer for an empty cmap. */
83#define CMAP_INITIALIZER { \
84 .impl = OVSRCU_INITIALIZER((struct cmap_impl *) &empty_cmap) \
85 }
86extern OVS_ALIGNED_VAR(CACHE_LINE_SIZE) const struct cmap_impl empty_cmap;
87
0e666160
BP
88/* Initialization. */
89void cmap_init(struct cmap *);
90void cmap_destroy(struct cmap *);
91
92/* Count. */
93size_t cmap_count(const struct cmap *);
94bool cmap_is_empty(const struct cmap *);
95
ee58b469
JR
96/* Insertion and deletion. Return the current count after the operation. */
97size_t cmap_insert(struct cmap *, struct cmap_node *, uint32_t hash);
98static inline size_t cmap_remove(struct cmap *, struct cmap_node *,
99 uint32_t hash);
100size_t cmap_replace(struct cmap *, struct cmap_node *old_node,
101 struct cmap_node *new_node, uint32_t hash);
0e666160
BP
102
103/* Search.
104 *
105 * These macros iterate NODE over all of the nodes in CMAP that have hash value
106 * equal to HASH. MEMBER must be the name of the 'struct cmap_node' member
107 * within NODE.
108 *
109 * CMAP and HASH are evaluated only once. NODE is evaluated many times.
110 *
111 *
112 * Thread-safety
113 * =============
114 *
52a524eb
JR
115 * CMAP_NODE_FOR_EACH will reliably visit each of the nodes starting with
116 * CMAP_NODE, even with concurrent insertions and deletions. (Of
117 * course, if nodes are being inserted or deleted, it might or might not visit
118 * the nodes actually being inserted or deleted.)
119 *
120 * CMAP_NODE_FOR_EACH_PROTECTED may only be used if the containing CMAP is
121 * guaranteed not to change during iteration. It may be only slightly faster.
122 *
0e666160
BP
123 * CMAP_FOR_EACH_WITH_HASH will reliably visit each of the nodes with the
124 * specified hash in CMAP, even with concurrent insertions and deletions. (Of
125 * course, if nodes with the given HASH are being inserted or deleted, it might
126 * or might not visit the nodes actually being inserted or deleted.)
127 *
128 * CMAP_FOR_EACH_WITH_HASH_PROTECTED may only be used if CMAP is guaranteed not
129 * to change during iteration. It may be very slightly faster.
130 */
52a524eb
JR
131#define CMAP_NODE_FOR_EACH(NODE, MEMBER, CMAP_NODE) \
132 for (INIT_CONTAINER(NODE, CMAP_NODE, MEMBER); \
133 (NODE) != OBJECT_CONTAINING(NULL, NODE, MEMBER); \
0e666160 134 ASSIGN_CONTAINER(NODE, cmap_node_next(&(NODE)->MEMBER), MEMBER))
52a524eb
JR
135#define CMAP_NODE_FOR_EACH_PROTECTED(NODE, MEMBER, CMAP_NODE) \
136 for (INIT_CONTAINER(NODE, CMAP_NODE, MEMBER); \
0e666160
BP
137 (NODE) != OBJECT_CONTAINING(NULL, NODE, MEMBER); \
138 ASSIGN_CONTAINER(NODE, cmap_node_next_protected(&(NODE)->MEMBER), \
139 MEMBER))
52a524eb
JR
140#define CMAP_FOR_EACH_WITH_HASH(NODE, MEMBER, HASH, CMAP) \
141 CMAP_NODE_FOR_EACH(NODE, MEMBER, cmap_find(CMAP, HASH))
142#define CMAP_FOR_EACH_WITH_HASH_PROTECTED(NODE, MEMBER, HASH, CMAP) \
48b1008c 143 CMAP_NODE_FOR_EACH_PROTECTED(NODE, MEMBER, cmap_find_protected(CMAP, HASH))
0e666160 144
55847abe 145const struct cmap_node *cmap_find(const struct cmap *, uint32_t hash);
0e666160
BP
146struct cmap_node *cmap_find_protected(const struct cmap *, uint32_t hash);
147
60d8ccae
YW
148/* Find node by index or find index by hash. The 'index' of a cmap entry is a
149 * way to combine the specific bucket and the entry of the bucket into a
150 * convenient single integer value. In other words, it is the index of the
151 * entry and each entry has an unique index. It is not used internally by
152 * cmap.
153 * Currently the functions assume index will not be larger than uint32_t. In
154 * OvS table size is usually much smaller than this size.*/
155const struct cmap_node * cmap_find_by_index(const struct cmap *,
156 uint32_t index);
157uint32_t cmap_find_index(const struct cmap *, uint32_t hash);
158
52a524eb
JR
159/* Looks up multiple 'hashes', when the corresponding bit in 'map' is 1,
160 * and sets the corresponding pointer in 'nodes', if the hash value was
161 * found from the 'cmap'. In other cases the 'nodes' values are not changed,
162 * i.e., no NULL pointers are stored there.
163 * Returns a map where a bit is set to 1 if the corresponding 'nodes' pointer
164 * was stored, 0 otherwise.
165 * Generally, the caller wants to use CMAP_NODE_FOR_EACH to verify for
166 * hash collisions. */
167unsigned long cmap_find_batch(const struct cmap *cmap, unsigned long map,
168 uint32_t hashes[],
169 const struct cmap_node *nodes[]);
170
0e666160
BP
171/* Iteration.
172 *
173 *
174 * Thread-safety
175 * =============
176 *
177 * Iteration is safe even in a cmap that is changing concurrently. However:
178 *
179 * - In the presence of concurrent calls to cmap_insert(), any given
180 * iteration might skip some nodes and might visit some nodes more than
181 * once. If this is a problem, then the iterating code should lock the
182 * data structure (a rwlock can be used to allow multiple threads to
183 * iterate in parallel).
184 *
185 * - Concurrent calls to cmap_remove() don't have the same problem. (A
186 * node being deleted may be visited once or not at all. Other nodes
187 * will be visited once.)
188 *
25c7da84
DDP
189 * - If the cmap is changing, it is not safe to quiesce while iterating.
190 * Even if the changes are done by the same thread that's performing the
191 * iteration (Corollary: it is not safe to call cmap_remove() and quiesce
192 * in the loop body).
193 *
0e666160
BP
194 *
195 * Example
196 * =======
197 *
198 * struct my_node {
199 * struct cmap_node cmap_node;
200 * int extra_data;
201 * };
202 *
0e666160 203 * struct cmap my_map;
417784dc 204 * struct my_node *my_node;
0e666160 205 *
417784dc 206 * cmap_init(&my_map);
0e666160 207 * ...add data...
417784dc 208 * CMAP_FOR_EACH (my_node, cmap_node, &my_map) {
0e666160
BP
209 * ...operate on my_node...
210 * }
211 *
6bc3bb82
BP
212 * CMAP_FOR_EACH is "safe" in the sense of HMAP_FOR_EACH_SAFE. That is, it is
213 * safe to free the current node before going on to the next iteration. Most
214 * of the time, though, this doesn't matter for a cmap because node
215 * deallocation has to be postponed until the next grace period. This means
216 * that this guarantee is useful only in deallocation code already executing at
217 * postponed time, when it is known that the RCU grace period has already
218 * expired.
0e666160 219 */
a532e683 220
6bc3bb82
BP
221#define CMAP_CURSOR_FOR_EACH__(NODE, CURSOR, MEMBER) \
222 ((CURSOR)->node \
f17e8ad6 223 ? (INIT_CONTAINER(NODE, (CURSOR)->node, MEMBER), \
6bc3bb82
BP
224 cmap_cursor_advance(CURSOR), \
225 true) \
226 : false)
227
228#define CMAP_CURSOR_FOR_EACH(NODE, MEMBER, CURSOR, CMAP) \
229 for (*(CURSOR) = cmap_cursor_start(CMAP); \
230 CMAP_CURSOR_FOR_EACH__(NODE, CURSOR, MEMBER); \
78c8df12
BP
231 )
232
6bc3bb82
BP
233#define CMAP_CURSOR_FOR_EACH_CONTINUE(NODE, MEMBER, CURSOR) \
234 while (CMAP_CURSOR_FOR_EACH__(NODE, CURSOR, MEMBER))
9d933fc7 235
0e666160
BP
236struct cmap_cursor {
237 const struct cmap_impl *impl;
238 uint32_t bucket_idx;
239 int entry_idx;
78c8df12 240 struct cmap_node *node;
0e666160
BP
241};
242
78c8df12
BP
243struct cmap_cursor cmap_cursor_start(const struct cmap *);
244void cmap_cursor_advance(struct cmap_cursor *);
a532e683 245
ccf690ac
JP
246/* Generate a unique name for the cursor with the __COUNTER__ macro to
247 * allow nesting of CMAP_FOR_EACH loops. */
248#define CURSOR_JOIN2(x,y) x##y
249#define CURSOR_JOIN(x, y) CURSOR_JOIN2(x,y)
250
251#define CMAP_FOR_EACH__(NODE, MEMBER, CMAP, CURSOR_NAME) \
252 for (struct cmap_cursor CURSOR_NAME = cmap_cursor_start(CMAP); \
253 CMAP_CURSOR_FOR_EACH__(NODE, &CURSOR_NAME, MEMBER); \
78c8df12 254 )
a532e683 255
ccf690ac
JP
256#define CMAP_FOR_EACH(NODE, MEMBER, CMAP) \
257 CMAP_FOR_EACH__(NODE, MEMBER, CMAP, \
258 CURSOR_JOIN(cursor_, __COUNTER__))
259
9d933fc7
JR
260static inline struct cmap_node *cmap_first(const struct cmap *);
261
0e666160
BP
262/* Another, less preferred, form of iteration, for use in situations where it
263 * is difficult to maintain a pointer to a cmap_node. */
264struct cmap_position {
265 unsigned int bucket;
266 unsigned int entry;
267 unsigned int offset;
268};
269
270struct cmap_node *cmap_next_position(const struct cmap *,
271 struct cmap_position *);
272
9d933fc7
JR
273/* Returns the first node in 'cmap', in arbitrary order, or a null pointer if
274 * 'cmap' is empty. */
275static inline struct cmap_node *
276cmap_first(const struct cmap *cmap)
277{
278 struct cmap_position pos = { 0, 0, 0 };
279
280 return cmap_next_position(cmap, &pos);
281}
282
ee58b469
JR
283/* Removes 'node' from 'cmap'. The caller must ensure that 'cmap' cannot
284 * change concurrently (from another thread).
285 *
286 * 'node' must not be destroyed or modified or inserted back into 'cmap' or
287 * into any other concurrent hash map while any other thread might be accessing
288 * it. One correct way to do this is to free it from an RCU callback with
289 * ovsrcu_postpone().
290 *
291 * Returns the current number of nodes in the cmap after the removal. */
292static inline size_t
293cmap_remove(struct cmap *cmap, struct cmap_node *node, uint32_t hash)
294{
295 return cmap_replace(cmap, node, NULL, hash);
296}
297
0e666160 298#endif /* cmap.h */