]> git.proxmox.com Git - ovs.git/blob - lib/classifier-private.h
classifier: Silence sparse warning (sparse bug)
[ovs.git] / lib / classifier-private.h
1 /*
2 * Copyright (c) 2014 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #ifndef CLASSIFIER_PRIVATE_H
18 #define CLASSIFIER_PRIVATE_H 1
19
20 #include "cmap.h"
21 #include "flow.h"
22 #include "hash.h"
23 #include "rculist.h"
24 #include "tag.h"
25
26 /* Classifier internal definitions, subject to change at any time. */
27
28 /* A set of rules that all have the same fields wildcarded. */
29 struct cls_subtable {
30 struct cmap_node cmap_node; /* Within classifier's 'subtables_map'. */
31
32 /* These fields are only used by writers. */
33 int max_priority; /* Max priority of any rule in subtable. */
34 unsigned int max_count; /* Count of max_priority rules. */
35
36 /* Accessed by iterators. */
37 struct rculist rules_list; /* Unordered. */
38
39 /* Identical, but lower priority rules are not inserted to any of the
40 * following data structures. */
41
42 /* These fields are accessed by readers who care about wildcarding. */
43 const tag_type tag; /* Tag generated from mask for partitioning. */
44 const uint8_t n_indices; /* How many indices to use. */
45 const uint8_t index_ofs[CLS_MAX_INDICES]; /* u32 segment boundaries. */
46 unsigned int trie_plen[CLS_MAX_TRIES]; /* Trie prefix length in 'mask'
47 * (runtime configurable). */
48 const int ports_mask_len;
49 struct cmap indices[CLS_MAX_INDICES]; /* Staged lookup indices. */
50 rcu_trie_ptr ports_trie; /* NULL if none. */
51
52 /* These fields are accessed by all readers. */
53 struct cmap rules; /* Contains 'cls_match'es. */
54 const struct minimask mask; /* Wildcards for fields. */
55 /* 'mask' must be the last field. */
56 };
57
58 /* Associates a metadata value (that is, a value of the OpenFlow 1.1+ metadata
59 * field) with tags for the "cls_subtable"s that contain rules that match that
60 * metadata value. */
61 struct cls_partition {
62 struct cmap_node cmap_node; /* In struct classifier's 'partitions' map. */
63 ovs_be64 metadata; /* metadata value for this partition. */
64 tag_type tags; /* OR of each flow's cls_subtable tag. */
65 struct tag_tracker tracker; /* Tracks the bits in 'tags'. */
66 };
67
68 /* Internal representation of a rule in a "struct cls_subtable". */
69 struct cls_match {
70 /* Accessed by everybody. */
71 struct rculist list; /* Identical, lower-priority rules. */
72
73 /* Accessed only by writers. */
74 struct cls_partition *partition;
75
76 /* Accessed by readers interested in wildcarding. */
77 const int priority; /* Larger numbers are higher priorities. */
78 struct cmap_node index_nodes[CLS_MAX_INDICES]; /* Within subtable's
79 * 'indices'. */
80 /* Accessed by all readers. */
81 struct cmap_node cmap_node; /* Within struct cls_subtable 'rules'. */
82 const struct cls_rule *cls_rule;
83 const struct miniflow flow; /* Matching rule. Mask is in the subtable. */
84 /* 'flow' must be the last field. */
85 };
86
87 /* A longest-prefix match tree. */
88 struct trie_node {
89 uint32_t prefix; /* Prefix bits for this node, MSB first. */
90 uint8_t n_bits; /* Never zero, except for the root node. */
91 unsigned int n_rules; /* Number of rules that have this prefix. */
92 rcu_trie_ptr edges[2]; /* Both NULL if leaf. */
93 };
94
95 /* Max bits per node. Must fit in struct trie_node's 'prefix'.
96 * Also tested with 16, 8, and 5 to stress the implementation. */
97 #define TRIE_PREFIX_BITS 32
98 \f
99 /* flow/miniflow/minimask/minimatch utilities.
100 * These are only used by the classifier, so place them here to allow
101 * for better optimization. */
102
103 static inline uint64_t
104 miniflow_get_map_in_range(const struct miniflow *miniflow,
105 uint8_t start, uint8_t end, unsigned int *offset)
106 {
107 uint64_t map = miniflow->map;
108 *offset = 0;
109
110 if (start > 0) {
111 uint64_t msk = (UINT64_C(1) << start) - 1; /* 'start' LSBs set */
112 *offset = count_1bits(map & msk);
113 map &= ~msk;
114 }
115 if (end < FLOW_U32S) {
116 uint64_t msk = (UINT64_C(1) << end) - 1; /* 'end' LSBs set */
117 map &= msk;
118 }
119 return map;
120 }
121
122 /* Returns a hash value for the bits of 'flow' where there are 1-bits in
123 * 'mask', given 'basis'.
124 *
125 * The hash values returned by this function are the same as those returned by
126 * miniflow_hash_in_minimask(), only the form of the arguments differ. */
127 static inline uint32_t
128 flow_hash_in_minimask(const struct flow *flow, const struct minimask *mask,
129 uint32_t basis)
130 {
131 const uint32_t *mask_values = miniflow_get_u32_values(&mask->masks);
132 const uint32_t *flow_u32 = (const uint32_t *)flow;
133 const uint32_t *p = mask_values;
134 uint32_t hash;
135 int idx;
136
137 hash = basis;
138 MAP_FOR_EACH_INDEX(idx, mask->masks.map) {
139 hash = hash_add(hash, flow_u32[idx] & *p++);
140 }
141
142 return hash_finish(hash, (p - mask_values) * 4);
143 }
144
145 /* Returns a hash value for the bits of 'flow' where there are 1-bits in
146 * 'mask', given 'basis'.
147 *
148 * The hash values returned by this function are the same as those returned by
149 * flow_hash_in_minimask(), only the form of the arguments differ. */
150 static inline uint32_t
151 miniflow_hash_in_minimask(const struct miniflow *flow,
152 const struct minimask *mask, uint32_t basis)
153 {
154 const uint32_t *mask_values = miniflow_get_u32_values(&mask->masks);
155 const uint32_t *p = mask_values;
156 uint32_t hash = basis;
157 uint32_t flow_u32;
158
159 MINIFLOW_FOR_EACH_IN_MAP(flow_u32, flow, mask->masks.map) {
160 hash = hash_add(hash, flow_u32 & *p++);
161 }
162
163 return hash_finish(hash, (p - mask_values) * 4);
164 }
165
166 /* Returns a hash value for the bits of range [start, end) in 'flow',
167 * where there are 1-bits in 'mask', given 'hash'.
168 *
169 * The hash values returned by this function are the same as those returned by
170 * minimatch_hash_range(), only the form of the arguments differ. */
171 static inline uint32_t
172 flow_hash_in_minimask_range(const struct flow *flow,
173 const struct minimask *mask,
174 uint8_t start, uint8_t end, uint32_t *basis)
175 {
176 const uint32_t *mask_values = miniflow_get_u32_values(&mask->masks);
177 const uint32_t *flow_u32 = (const uint32_t *)flow;
178 unsigned int offset;
179 uint64_t map;
180 const uint32_t *p;
181 uint32_t hash = *basis;
182 int idx;
183
184 map = miniflow_get_map_in_range(&mask->masks, start, end, &offset);
185 p = mask_values + offset;
186 MAP_FOR_EACH_INDEX(idx, map) {
187 hash = hash_add(hash, flow_u32[idx] & *p++);
188 }
189
190 *basis = hash; /* Allow continuation from the unfinished value. */
191 return hash_finish(hash, (p - mask_values) * 4);
192 }
193
194 /* Fold minimask 'mask''s wildcard mask into 'wc's wildcard mask. */
195 static inline void
196 flow_wildcards_fold_minimask(struct flow_wildcards *wc,
197 const struct minimask *mask)
198 {
199 flow_union_with_miniflow(&wc->masks, &mask->masks);
200 }
201
202 /* Fold minimask 'mask''s wildcard mask into 'wc's wildcard mask
203 * in range [start, end). */
204 static inline void
205 flow_wildcards_fold_minimask_range(struct flow_wildcards *wc,
206 const struct minimask *mask,
207 uint8_t start, uint8_t end)
208 {
209 uint32_t *dst_u32 = (uint32_t *)&wc->masks;
210 unsigned int offset;
211 uint64_t map;
212 const uint32_t *p;
213 int idx;
214
215 map = miniflow_get_map_in_range(&mask->masks, start, end, &offset);
216 p = miniflow_get_u32_values(&mask->masks) + offset;
217 MAP_FOR_EACH_INDEX(idx, map) {
218 dst_u32[idx] |= *p++;
219 }
220 }
221
222 /* Returns a hash value for 'flow', given 'basis'. */
223 static inline uint32_t
224 miniflow_hash(const struct miniflow *flow, uint32_t basis)
225 {
226 const uint32_t *values = miniflow_get_u32_values(flow);
227 const uint32_t *p = values;
228 uint32_t hash = basis;
229 uint64_t hash_map = 0;
230 uint64_t map;
231
232 for (map = flow->map; map; map = zero_rightmost_1bit(map)) {
233 if (*p) {
234 hash = hash_add(hash, *p);
235 hash_map |= rightmost_1bit(map);
236 }
237 p++;
238 }
239 hash = hash_add(hash, hash_map);
240 hash = hash_add(hash, hash_map >> 32);
241
242 return hash_finish(hash, p - values);
243 }
244
245 /* Returns a hash value for 'mask', given 'basis'. */
246 static inline uint32_t
247 minimask_hash(const struct minimask *mask, uint32_t basis)
248 {
249 return miniflow_hash(&mask->masks, basis);
250 }
251
252 /* Returns a hash value for 'match', given 'basis'. */
253 static inline uint32_t
254 minimatch_hash(const struct minimatch *match, uint32_t basis)
255 {
256 return miniflow_hash(&match->flow, minimask_hash(&match->mask, basis));
257 }
258
259 /* Returns a hash value for the bits of range [start, end) in 'minimatch',
260 * given 'basis'.
261 *
262 * The hash values returned by this function are the same as those returned by
263 * flow_hash_in_minimask_range(), only the form of the arguments differ. */
264 static inline uint32_t
265 minimatch_hash_range(const struct minimatch *match, uint8_t start, uint8_t end,
266 uint32_t *basis)
267 {
268 unsigned int offset;
269 const uint32_t *p, *q;
270 uint32_t hash = *basis;
271 int n, i;
272
273 n = count_1bits(miniflow_get_map_in_range(&match->mask.masks, start, end,
274 &offset));
275 q = miniflow_get_u32_values(&match->mask.masks) + offset;
276 p = miniflow_get_u32_values(&match->flow) + offset;
277
278 for (i = 0; i < n; i++) {
279 hash = hash_add(hash, p[i] & q[i]);
280 }
281 *basis = hash; /* Allow continuation from the unfinished value. */
282 return hash_finish(hash, (offset + n) * 4);
283 }
284
285 #endif