]> git.proxmox.com Git - mirror_ovs.git/blob - lib/flow.c
ofp-util: Zero out padding bytes in ofputil_ipfix_stats_to_reply().
[mirror_ovs.git] / lib / flow.c
1 /*
2 * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16 #include <config.h>
17 #include <sys/types.h>
18 #include "flow.h"
19 #include <errno.h>
20 #include <inttypes.h>
21 #include <limits.h>
22 #include <netinet/in.h>
23 #include <netinet/icmp6.h>
24 #include <netinet/ip6.h>
25 #include <stdint.h>
26 #include <stdlib.h>
27 #include <string.h>
28 #include "byte-order.h"
29 #include "colors.h"
30 #include "coverage.h"
31 #include "csum.h"
32 #include "openvswitch/dynamic-string.h"
33 #include "hash.h"
34 #include "jhash.h"
35 #include "openvswitch/match.h"
36 #include "dp-packet.h"
37 #include "openflow/openflow.h"
38 #include "packets.h"
39 #include "odp-util.h"
40 #include "random.h"
41 #include "unaligned.h"
42
43 COVERAGE_DEFINE(flow_extract);
44 COVERAGE_DEFINE(miniflow_malloc);
45
46 /* U64 indices for segmented flow classification. */
47 const uint8_t flow_segment_u64s[4] = {
48 FLOW_SEGMENT_1_ENDS_AT / sizeof(uint64_t),
49 FLOW_SEGMENT_2_ENDS_AT / sizeof(uint64_t),
50 FLOW_SEGMENT_3_ENDS_AT / sizeof(uint64_t),
51 FLOW_U64S
52 };
53
54 /* Asserts that field 'f1' follows immediately after 'f0' in struct flow,
55 * without any intervening padding. */
56 #define ASSERT_SEQUENTIAL(f0, f1) \
57 BUILD_ASSERT_DECL(offsetof(struct flow, f0) \
58 + MEMBER_SIZEOF(struct flow, f0) \
59 == offsetof(struct flow, f1))
60
61 /* Asserts that fields 'f0' and 'f1' are in the same 32-bit aligned word within
62 * struct flow. */
63 #define ASSERT_SAME_WORD(f0, f1) \
64 BUILD_ASSERT_DECL(offsetof(struct flow, f0) / 4 \
65 == offsetof(struct flow, f1) / 4)
66
67 /* Asserts that 'f0' and 'f1' are both sequential and within the same 32-bit
68 * aligned word in struct flow. */
69 #define ASSERT_SEQUENTIAL_SAME_WORD(f0, f1) \
70 ASSERT_SEQUENTIAL(f0, f1); \
71 ASSERT_SAME_WORD(f0, f1)
72
73 /* miniflow_extract() assumes the following to be true to optimize the
74 * extraction process. */
75 ASSERT_SEQUENTIAL_SAME_WORD(dl_type, vlan_tci);
76
77 ASSERT_SEQUENTIAL_SAME_WORD(nw_frag, nw_tos);
78 ASSERT_SEQUENTIAL_SAME_WORD(nw_tos, nw_ttl);
79 ASSERT_SEQUENTIAL_SAME_WORD(nw_ttl, nw_proto);
80
81 /* TCP flags in the middle of a BE64, zeroes in the other half. */
82 BUILD_ASSERT_DECL(offsetof(struct flow, tcp_flags) % 8 == 4);
83
84 #if WORDS_BIGENDIAN
85 #define TCP_FLAGS_BE32(tcp_ctl) ((OVS_FORCE ovs_be32)TCP_FLAGS_BE16(tcp_ctl) \
86 << 16)
87 #else
88 #define TCP_FLAGS_BE32(tcp_ctl) ((OVS_FORCE ovs_be32)TCP_FLAGS_BE16(tcp_ctl))
89 #endif
90
91 ASSERT_SEQUENTIAL_SAME_WORD(tp_src, tp_dst);
92
93 /* Removes 'size' bytes from the head end of '*datap', of size '*sizep', which
94 * must contain at least 'size' bytes of data. Returns the first byte of data
95 * removed. */
96 static inline const void *
97 data_pull(const void **datap, size_t *sizep, size_t size)
98 {
99 const char *data = *datap;
100 *datap = data + size;
101 *sizep -= size;
102 return data;
103 }
104
105 /* If '*datap' has at least 'size' bytes of data, removes that many bytes from
106 * the head end of '*datap' and returns the first byte removed. Otherwise,
107 * returns a null pointer without modifying '*datap'. */
108 static inline const void *
109 data_try_pull(const void **datap, size_t *sizep, size_t size)
110 {
111 return OVS_LIKELY(*sizep >= size) ? data_pull(datap, sizep, size) : NULL;
112 }
113
114 /* Context for pushing data to a miniflow. */
115 struct mf_ctx {
116 struct flowmap map;
117 uint64_t *data;
118 uint64_t * const end;
119 };
120
121 /* miniflow_push_* macros allow filling in a miniflow data values in order.
122 * Assertions are needed only when the layout of the struct flow is modified.
123 * 'ofs' is a compile-time constant, which allows most of the code be optimized
124 * away. Some GCC versions gave warnings on ALWAYS_INLINE, so these are
125 * defined as macros. */
126
127 #if (FLOW_WC_SEQ != 35)
128 #define MINIFLOW_ASSERT(X) ovs_assert(X)
129 BUILD_MESSAGE("FLOW_WC_SEQ changed: miniflow_extract() will have runtime "
130 "assertions enabled. Consider updating FLOW_WC_SEQ after "
131 "testing")
132 #else
133 #define MINIFLOW_ASSERT(X)
134 #endif
135
136 /* True if 'IDX' and higher bits are not set. */
137 #define ASSERT_FLOWMAP_NOT_SET(FM, IDX) \
138 { \
139 MINIFLOW_ASSERT(!((FM)->bits[(IDX) / MAP_T_BITS] & \
140 (MAP_MAX << ((IDX) % MAP_T_BITS)))); \
141 for (size_t i = (IDX) / MAP_T_BITS + 1; i < FLOWMAP_UNITS; i++) { \
142 MINIFLOW_ASSERT(!(FM)->bits[i]); \
143 } \
144 }
145
146 #define miniflow_set_map(MF, OFS) \
147 { \
148 ASSERT_FLOWMAP_NOT_SET(&MF.map, (OFS)); \
149 flowmap_set(&MF.map, (OFS), 1); \
150 }
151
152 #define miniflow_assert_in_map(MF, OFS) \
153 MINIFLOW_ASSERT(flowmap_is_set(&MF.map, (OFS))); \
154 ASSERT_FLOWMAP_NOT_SET(&MF.map, (OFS) + 1)
155
156 #define miniflow_push_uint64_(MF, OFS, VALUE) \
157 { \
158 MINIFLOW_ASSERT(MF.data < MF.end && (OFS) % 8 == 0); \
159 *MF.data++ = VALUE; \
160 miniflow_set_map(MF, OFS / 8); \
161 }
162
163 #define miniflow_push_be64_(MF, OFS, VALUE) \
164 miniflow_push_uint64_(MF, OFS, (OVS_FORCE uint64_t)(VALUE))
165
166 #define miniflow_push_uint32_(MF, OFS, VALUE) \
167 { \
168 MINIFLOW_ASSERT(MF.data < MF.end); \
169 \
170 if ((OFS) % 8 == 0) { \
171 miniflow_set_map(MF, OFS / 8); \
172 *(uint32_t *)MF.data = VALUE; \
173 } else if ((OFS) % 8 == 4) { \
174 miniflow_assert_in_map(MF, OFS / 8); \
175 *((uint32_t *)MF.data + 1) = VALUE; \
176 MF.data++; \
177 } \
178 }
179
180 #define miniflow_push_be32_(MF, OFS, VALUE) \
181 miniflow_push_uint32_(MF, OFS, (OVS_FORCE uint32_t)(VALUE))
182
183 #define miniflow_push_uint16_(MF, OFS, VALUE) \
184 { \
185 MINIFLOW_ASSERT(MF.data < MF.end); \
186 \
187 if ((OFS) % 8 == 0) { \
188 miniflow_set_map(MF, OFS / 8); \
189 *(uint16_t *)MF.data = VALUE; \
190 } else if ((OFS) % 8 == 2) { \
191 miniflow_assert_in_map(MF, OFS / 8); \
192 *((uint16_t *)MF.data + 1) = VALUE; \
193 } else if ((OFS) % 8 == 4) { \
194 miniflow_assert_in_map(MF, OFS / 8); \
195 *((uint16_t *)MF.data + 2) = VALUE; \
196 } else if ((OFS) % 8 == 6) { \
197 miniflow_assert_in_map(MF, OFS / 8); \
198 *((uint16_t *)MF.data + 3) = VALUE; \
199 MF.data++; \
200 } \
201 }
202
203 #define miniflow_push_uint8_(MF, OFS, VALUE) \
204 { \
205 MINIFLOW_ASSERT(MF.data < MF.end); \
206 \
207 if ((OFS) % 8 == 0) { \
208 miniflow_set_map(MF, OFS / 8); \
209 *(uint8_t *)MF.data = VALUE; \
210 } else if ((OFS) % 8 == 7) { \
211 miniflow_assert_in_map(MF, OFS / 8); \
212 *((uint8_t *)MF.data + 7) = VALUE; \
213 MF.data++; \
214 } else { \
215 miniflow_assert_in_map(MF, OFS / 8); \
216 *((uint8_t *)MF.data + ((OFS) % 8)) = VALUE; \
217 } \
218 }
219
220 #define miniflow_pad_to_64_(MF, OFS) \
221 { \
222 MINIFLOW_ASSERT((OFS) % 8 != 0); \
223 miniflow_assert_in_map(MF, OFS / 8); \
224 \
225 memset((uint8_t *)MF.data + (OFS) % 8, 0, 8 - (OFS) % 8); \
226 MF.data++; \
227 }
228
229 #define miniflow_pad_from_64_(MF, OFS) \
230 { \
231 MINIFLOW_ASSERT(MF.data < MF.end); \
232 \
233 MINIFLOW_ASSERT((OFS) % 8 != 0); \
234 miniflow_set_map(MF, OFS / 8); \
235 \
236 memset((uint8_t *)MF.data, 0, (OFS) % 8); \
237 }
238
239 #define miniflow_push_be16_(MF, OFS, VALUE) \
240 miniflow_push_uint16_(MF, OFS, (OVS_FORCE uint16_t)VALUE);
241
242 #define miniflow_push_be8_(MF, OFS, VALUE) \
243 miniflow_push_uint8_(MF, OFS, (OVS_FORCE uint8_t)VALUE);
244
245 #define miniflow_set_maps(MF, OFS, N_WORDS) \
246 { \
247 size_t ofs = (OFS); \
248 size_t n_words = (N_WORDS); \
249 \
250 MINIFLOW_ASSERT(n_words && MF.data + n_words <= MF.end); \
251 ASSERT_FLOWMAP_NOT_SET(&MF.map, ofs); \
252 flowmap_set(&MF.map, ofs, n_words); \
253 }
254
255 /* Data at 'valuep' may be unaligned. */
256 #define miniflow_push_words_(MF, OFS, VALUEP, N_WORDS) \
257 { \
258 MINIFLOW_ASSERT((OFS) % 8 == 0); \
259 miniflow_set_maps(MF, (OFS) / 8, (N_WORDS)); \
260 memcpy(MF.data, (VALUEP), (N_WORDS) * sizeof *MF.data); \
261 MF.data += (N_WORDS); \
262 }
263
264 /* Push 32-bit words padded to 64-bits. */
265 #define miniflow_push_words_32_(MF, OFS, VALUEP, N_WORDS) \
266 { \
267 miniflow_set_maps(MF, (OFS) / 8, DIV_ROUND_UP(N_WORDS, 2)); \
268 memcpy(MF.data, (VALUEP), (N_WORDS) * sizeof(uint32_t)); \
269 MF.data += DIV_ROUND_UP(N_WORDS, 2); \
270 if ((N_WORDS) & 1) { \
271 *((uint32_t *)MF.data - 1) = 0; \
272 } \
273 }
274
275 /* Data at 'valuep' may be unaligned. */
276 /* MACs start 64-aligned, and must be followed by other data or padding. */
277 #define miniflow_push_macs_(MF, OFS, VALUEP) \
278 { \
279 miniflow_set_maps(MF, (OFS) / 8, 2); \
280 memcpy(MF.data, (VALUEP), 2 * ETH_ADDR_LEN); \
281 MF.data += 1; /* First word only. */ \
282 }
283
284 #define miniflow_push_uint32(MF, FIELD, VALUE) \
285 miniflow_push_uint32_(MF, offsetof(struct flow, FIELD), VALUE)
286
287 #define miniflow_push_be32(MF, FIELD, VALUE) \
288 miniflow_push_be32_(MF, offsetof(struct flow, FIELD), VALUE)
289
290 #define miniflow_push_uint16(MF, FIELD, VALUE) \
291 miniflow_push_uint16_(MF, offsetof(struct flow, FIELD), VALUE)
292
293 #define miniflow_push_be16(MF, FIELD, VALUE) \
294 miniflow_push_be16_(MF, offsetof(struct flow, FIELD), VALUE)
295
296 #define miniflow_push_uint8(MF, FIELD, VALUE) \
297 miniflow_push_uint8_(MF, offsetof(struct flow, FIELD), VALUE)
298
299 #define miniflow_pad_to_64(MF, FIELD) \
300 miniflow_pad_to_64_(MF, OFFSETOFEND(struct flow, FIELD))
301
302 #define miniflow_pad_from_64(MF, FIELD) \
303 miniflow_pad_from_64_(MF, offsetof(struct flow, FIELD))
304
305 #define miniflow_push_words(MF, FIELD, VALUEP, N_WORDS) \
306 miniflow_push_words_(MF, offsetof(struct flow, FIELD), VALUEP, N_WORDS)
307
308 #define miniflow_push_words_32(MF, FIELD, VALUEP, N_WORDS) \
309 miniflow_push_words_32_(MF, offsetof(struct flow, FIELD), VALUEP, N_WORDS)
310
311 #define miniflow_push_macs(MF, FIELD, VALUEP) \
312 miniflow_push_macs_(MF, offsetof(struct flow, FIELD), VALUEP)
313
314 /* Pulls the MPLS headers at '*datap' and returns the count of them. */
315 static inline int
316 parse_mpls(const void **datap, size_t *sizep)
317 {
318 const struct mpls_hdr *mh;
319 int count = 0;
320
321 while ((mh = data_try_pull(datap, sizep, sizeof *mh))) {
322 count++;
323 if (mh->mpls_lse.lo & htons(1 << MPLS_BOS_SHIFT)) {
324 break;
325 }
326 }
327 return MIN(count, FLOW_MAX_MPLS_LABELS);
328 }
329
330 static inline ovs_be16
331 parse_vlan(const void **datap, size_t *sizep)
332 {
333 const struct eth_header *eth = *datap;
334
335 struct qtag_prefix {
336 ovs_be16 eth_type; /* ETH_TYPE_VLAN */
337 ovs_be16 tci;
338 };
339
340 data_pull(datap, sizep, ETH_ADDR_LEN * 2);
341
342 if (eth->eth_type == htons(ETH_TYPE_VLAN)) {
343 if (OVS_LIKELY(*sizep
344 >= sizeof(struct qtag_prefix) + sizeof(ovs_be16))) {
345 const struct qtag_prefix *qp = data_pull(datap, sizep, sizeof *qp);
346 return qp->tci | htons(VLAN_CFI);
347 }
348 }
349 return 0;
350 }
351
352 static inline ovs_be16
353 parse_ethertype(const void **datap, size_t *sizep)
354 {
355 const struct llc_snap_header *llc;
356 ovs_be16 proto;
357
358 proto = *(ovs_be16 *) data_pull(datap, sizep, sizeof proto);
359 if (OVS_LIKELY(ntohs(proto) >= ETH_TYPE_MIN)) {
360 return proto;
361 }
362
363 if (OVS_UNLIKELY(*sizep < sizeof *llc)) {
364 return htons(FLOW_DL_TYPE_NONE);
365 }
366
367 llc = *datap;
368 if (OVS_UNLIKELY(llc->llc.llc_dsap != LLC_DSAP_SNAP
369 || llc->llc.llc_ssap != LLC_SSAP_SNAP
370 || llc->llc.llc_cntl != LLC_CNTL_SNAP
371 || memcmp(llc->snap.snap_org, SNAP_ORG_ETHERNET,
372 sizeof llc->snap.snap_org))) {
373 return htons(FLOW_DL_TYPE_NONE);
374 }
375
376 data_pull(datap, sizep, sizeof *llc);
377
378 if (OVS_LIKELY(ntohs(llc->snap.snap_type) >= ETH_TYPE_MIN)) {
379 return llc->snap.snap_type;
380 }
381
382 return htons(FLOW_DL_TYPE_NONE);
383 }
384
385 static inline void
386 parse_icmpv6(const void **datap, size_t *sizep, const struct icmp6_hdr *icmp,
387 const struct in6_addr **nd_target,
388 struct eth_addr arp_buf[2])
389 {
390 if (icmp->icmp6_code == 0 &&
391 (icmp->icmp6_type == ND_NEIGHBOR_SOLICIT ||
392 icmp->icmp6_type == ND_NEIGHBOR_ADVERT)) {
393
394 *nd_target = data_try_pull(datap, sizep, sizeof **nd_target);
395 if (OVS_UNLIKELY(!*nd_target)) {
396 return;
397 }
398
399 while (*sizep >= 8) {
400 /* The minimum size of an option is 8 bytes, which also is
401 * the size of Ethernet link-layer options. */
402 const struct ovs_nd_opt *nd_opt = *datap;
403 int opt_len = nd_opt->nd_opt_len * ND_OPT_LEN;
404
405 if (!opt_len || opt_len > *sizep) {
406 return;
407 }
408
409 /* Store the link layer address if the appropriate option is
410 * provided. It is considered an error if the same link
411 * layer option is specified twice. */
412 if (nd_opt->nd_opt_type == ND_OPT_SOURCE_LINKADDR
413 && opt_len == 8) {
414 if (OVS_LIKELY(eth_addr_is_zero(arp_buf[0]))) {
415 arp_buf[0] = nd_opt->nd_opt_mac;
416 } else {
417 goto invalid;
418 }
419 } else if (nd_opt->nd_opt_type == ND_OPT_TARGET_LINKADDR
420 && opt_len == 8) {
421 if (OVS_LIKELY(eth_addr_is_zero(arp_buf[1]))) {
422 arp_buf[1] = nd_opt->nd_opt_mac;
423 } else {
424 goto invalid;
425 }
426 }
427
428 if (OVS_UNLIKELY(!data_try_pull(datap, sizep, opt_len))) {
429 return;
430 }
431 }
432 }
433
434 return;
435
436 invalid:
437 *nd_target = NULL;
438 arp_buf[0] = eth_addr_zero;
439 arp_buf[1] = eth_addr_zero;
440 }
441
442 /* Initializes 'flow' members from 'packet' and 'md'
443 *
444 * Initializes 'packet' header l2 pointer to the start of the Ethernet
445 * header, and the layer offsets as follows:
446 *
447 * - packet->l2_5_ofs to the start of the MPLS shim header, or UINT16_MAX
448 * when there is no MPLS shim header.
449 *
450 * - packet->l3_ofs to just past the Ethernet header, or just past the
451 * vlan_header if one is present, to the first byte of the payload of the
452 * Ethernet frame. UINT16_MAX if the frame is too short to contain an
453 * Ethernet header.
454 *
455 * - packet->l4_ofs to just past the IPv4 header, if one is present and
456 * has at least the content used for the fields of interest for the flow,
457 * otherwise UINT16_MAX.
458 */
459 void
460 flow_extract(struct dp_packet *packet, struct flow *flow)
461 {
462 struct {
463 struct miniflow mf;
464 uint64_t buf[FLOW_U64S];
465 } m;
466
467 COVERAGE_INC(flow_extract);
468
469 miniflow_extract(packet, &m.mf);
470 miniflow_expand(&m.mf, flow);
471 }
472
473 /* Caller is responsible for initializing 'dst' with enough storage for
474 * FLOW_U64S * 8 bytes. */
475 void
476 miniflow_extract(struct dp_packet *packet, struct miniflow *dst)
477 {
478 const struct pkt_metadata *md = &packet->md;
479 const void *data = dp_packet_data(packet);
480 size_t size = dp_packet_size(packet);
481 uint64_t *values = miniflow_values(dst);
482 struct mf_ctx mf = { FLOWMAP_EMPTY_INITIALIZER, values,
483 values + FLOW_U64S };
484 const char *l2;
485 ovs_be16 dl_type;
486 uint8_t nw_frag, nw_tos, nw_ttl, nw_proto;
487
488 /* Metadata. */
489 if (flow_tnl_dst_is_set(&md->tunnel)) {
490 miniflow_push_words(mf, tunnel, &md->tunnel,
491 offsetof(struct flow_tnl, metadata) /
492 sizeof(uint64_t));
493
494 if (!(md->tunnel.flags & FLOW_TNL_F_UDPIF)) {
495 if (md->tunnel.metadata.present.map) {
496 miniflow_push_words(mf, tunnel.metadata, &md->tunnel.metadata,
497 sizeof md->tunnel.metadata /
498 sizeof(uint64_t));
499 }
500 } else {
501 if (md->tunnel.metadata.present.len) {
502 miniflow_push_words(mf, tunnel.metadata.present,
503 &md->tunnel.metadata.present, 1);
504 miniflow_push_words(mf, tunnel.metadata.opts.gnv,
505 md->tunnel.metadata.opts.gnv,
506 DIV_ROUND_UP(md->tunnel.metadata.present.len,
507 sizeof(uint64_t)));
508 }
509 }
510 }
511 if (md->skb_priority || md->pkt_mark) {
512 miniflow_push_uint32(mf, skb_priority, md->skb_priority);
513 miniflow_push_uint32(mf, pkt_mark, md->pkt_mark);
514 }
515 miniflow_push_uint32(mf, dp_hash, md->dp_hash);
516 miniflow_push_uint32(mf, in_port, odp_to_u32(md->in_port.odp_port));
517 if (md->recirc_id || md->ct_state) {
518 miniflow_push_uint32(mf, recirc_id, md->recirc_id);
519 miniflow_push_uint16(mf, ct_state, md->ct_state);
520 miniflow_push_uint16(mf, ct_zone, md->ct_zone);
521 }
522
523 if (md->ct_state) {
524 miniflow_push_uint32(mf, ct_mark, md->ct_mark);
525 miniflow_pad_to_64(mf, ct_mark);
526
527 if (!ovs_u128_is_zero(md->ct_label)) {
528 miniflow_push_words(mf, ct_label, &md->ct_label,
529 sizeof md->ct_label / sizeof(uint64_t));
530 }
531 }
532
533 /* Initialize packet's layer pointer and offsets. */
534 l2 = data;
535 dp_packet_reset_offsets(packet);
536
537 /* Must have full Ethernet header to proceed. */
538 if (OVS_UNLIKELY(size < sizeof(struct eth_header))) {
539 goto out;
540 } else {
541 ovs_be16 vlan_tci;
542
543 /* Link layer. */
544 ASSERT_SEQUENTIAL(dl_dst, dl_src);
545 miniflow_push_macs(mf, dl_dst, data);
546 /* dl_type, vlan_tci. */
547 vlan_tci = parse_vlan(&data, &size);
548 dl_type = parse_ethertype(&data, &size);
549 miniflow_push_be16(mf, dl_type, dl_type);
550 miniflow_push_be16(mf, vlan_tci, vlan_tci);
551 }
552
553 /* Parse mpls. */
554 if (OVS_UNLIKELY(eth_type_mpls(dl_type))) {
555 int count;
556 const void *mpls = data;
557
558 packet->l2_5_ofs = (char *)data - l2;
559 count = parse_mpls(&data, &size);
560 miniflow_push_words_32(mf, mpls_lse, mpls, count);
561 }
562
563 /* Network layer. */
564 packet->l3_ofs = (char *)data - l2;
565
566 nw_frag = 0;
567 if (OVS_LIKELY(dl_type == htons(ETH_TYPE_IP))) {
568 const struct ip_header *nh = data;
569 int ip_len;
570 uint16_t tot_len;
571
572 if (OVS_UNLIKELY(size < IP_HEADER_LEN)) {
573 goto out;
574 }
575 ip_len = IP_IHL(nh->ip_ihl_ver) * 4;
576
577 if (OVS_UNLIKELY(ip_len < IP_HEADER_LEN)) {
578 goto out;
579 }
580 if (OVS_UNLIKELY(size < ip_len)) {
581 goto out;
582 }
583 tot_len = ntohs(nh->ip_tot_len);
584 if (OVS_UNLIKELY(tot_len > size)) {
585 goto out;
586 }
587 if (OVS_UNLIKELY(size - tot_len > UINT8_MAX)) {
588 goto out;
589 }
590 dp_packet_set_l2_pad_size(packet, size - tot_len);
591 size = tot_len; /* Never pull padding. */
592
593 /* Push both source and destination address at once. */
594 miniflow_push_words(mf, nw_src, &nh->ip_src, 1);
595
596 miniflow_push_be32(mf, ipv6_label, 0); /* Padding for IPv4. */
597
598 nw_tos = nh->ip_tos;
599 nw_ttl = nh->ip_ttl;
600 nw_proto = nh->ip_proto;
601 if (OVS_UNLIKELY(IP_IS_FRAGMENT(nh->ip_frag_off))) {
602 nw_frag = FLOW_NW_FRAG_ANY;
603 if (nh->ip_frag_off & htons(IP_FRAG_OFF_MASK)) {
604 nw_frag |= FLOW_NW_FRAG_LATER;
605 }
606 }
607 data_pull(&data, &size, ip_len);
608 } else if (dl_type == htons(ETH_TYPE_IPV6)) {
609 const struct ovs_16aligned_ip6_hdr *nh;
610 ovs_be32 tc_flow;
611 uint16_t plen;
612
613 if (OVS_UNLIKELY(size < sizeof *nh)) {
614 goto out;
615 }
616 nh = data_pull(&data, &size, sizeof *nh);
617
618 plen = ntohs(nh->ip6_plen);
619 if (OVS_UNLIKELY(plen > size)) {
620 goto out;
621 }
622 /* Jumbo Payload option not supported yet. */
623 if (OVS_UNLIKELY(size - plen > UINT8_MAX)) {
624 goto out;
625 }
626 dp_packet_set_l2_pad_size(packet, size - plen);
627 size = plen; /* Never pull padding. */
628
629 miniflow_push_words(mf, ipv6_src, &nh->ip6_src,
630 sizeof nh->ip6_src / 8);
631 miniflow_push_words(mf, ipv6_dst, &nh->ip6_dst,
632 sizeof nh->ip6_dst / 8);
633
634 tc_flow = get_16aligned_be32(&nh->ip6_flow);
635 {
636 ovs_be32 label = tc_flow & htonl(IPV6_LABEL_MASK);
637 miniflow_push_be32(mf, ipv6_label, label);
638 }
639
640 nw_tos = ntohl(tc_flow) >> 20;
641 nw_ttl = nh->ip6_hlim;
642 nw_proto = nh->ip6_nxt;
643
644 while (1) {
645 if (OVS_LIKELY((nw_proto != IPPROTO_HOPOPTS)
646 && (nw_proto != IPPROTO_ROUTING)
647 && (nw_proto != IPPROTO_DSTOPTS)
648 && (nw_proto != IPPROTO_AH)
649 && (nw_proto != IPPROTO_FRAGMENT))) {
650 /* It's either a terminal header (e.g., TCP, UDP) or one we
651 * don't understand. In either case, we're done with the
652 * packet, so use it to fill in 'nw_proto'. */
653 break;
654 }
655
656 /* We only verify that at least 8 bytes of the next header are
657 * available, but many of these headers are longer. Ensure that
658 * accesses within the extension header are within those first 8
659 * bytes. All extension headers are required to be at least 8
660 * bytes. */
661 if (OVS_UNLIKELY(size < 8)) {
662 goto out;
663 }
664
665 if ((nw_proto == IPPROTO_HOPOPTS)
666 || (nw_proto == IPPROTO_ROUTING)
667 || (nw_proto == IPPROTO_DSTOPTS)) {
668 /* These headers, while different, have the fields we care
669 * about in the same location and with the same
670 * interpretation. */
671 const struct ip6_ext *ext_hdr = data;
672 nw_proto = ext_hdr->ip6e_nxt;
673 if (OVS_UNLIKELY(!data_try_pull(&data, &size,
674 (ext_hdr->ip6e_len + 1) * 8))) {
675 goto out;
676 }
677 } else if (nw_proto == IPPROTO_AH) {
678 /* A standard AH definition isn't available, but the fields
679 * we care about are in the same location as the generic
680 * option header--only the header length is calculated
681 * differently. */
682 const struct ip6_ext *ext_hdr = data;
683 nw_proto = ext_hdr->ip6e_nxt;
684 if (OVS_UNLIKELY(!data_try_pull(&data, &size,
685 (ext_hdr->ip6e_len + 2) * 4))) {
686 goto out;
687 }
688 } else if (nw_proto == IPPROTO_FRAGMENT) {
689 const struct ovs_16aligned_ip6_frag *frag_hdr = data;
690
691 nw_proto = frag_hdr->ip6f_nxt;
692 if (!data_try_pull(&data, &size, sizeof *frag_hdr)) {
693 goto out;
694 }
695
696 /* We only process the first fragment. */
697 if (frag_hdr->ip6f_offlg != htons(0)) {
698 nw_frag = FLOW_NW_FRAG_ANY;
699 if ((frag_hdr->ip6f_offlg & IP6F_OFF_MASK) != htons(0)) {
700 nw_frag |= FLOW_NW_FRAG_LATER;
701 nw_proto = IPPROTO_FRAGMENT;
702 break;
703 }
704 }
705 }
706 }
707 } else {
708 if (dl_type == htons(ETH_TYPE_ARP) ||
709 dl_type == htons(ETH_TYPE_RARP)) {
710 struct eth_addr arp_buf[2];
711 const struct arp_eth_header *arp = (const struct arp_eth_header *)
712 data_try_pull(&data, &size, ARP_ETH_HEADER_LEN);
713
714 if (OVS_LIKELY(arp) && OVS_LIKELY(arp->ar_hrd == htons(1))
715 && OVS_LIKELY(arp->ar_pro == htons(ETH_TYPE_IP))
716 && OVS_LIKELY(arp->ar_hln == ETH_ADDR_LEN)
717 && OVS_LIKELY(arp->ar_pln == 4)) {
718 miniflow_push_be32(mf, nw_src,
719 get_16aligned_be32(&arp->ar_spa));
720 miniflow_push_be32(mf, nw_dst,
721 get_16aligned_be32(&arp->ar_tpa));
722
723 /* We only match on the lower 8 bits of the opcode. */
724 if (OVS_LIKELY(ntohs(arp->ar_op) <= 0xff)) {
725 miniflow_push_be32(mf, ipv6_label, 0); /* Pad with ARP. */
726 miniflow_push_be32(mf, nw_frag, htonl(ntohs(arp->ar_op)));
727 }
728
729 /* Must be adjacent. */
730 ASSERT_SEQUENTIAL(arp_sha, arp_tha);
731
732 arp_buf[0] = arp->ar_sha;
733 arp_buf[1] = arp->ar_tha;
734 miniflow_push_macs(mf, arp_sha, arp_buf);
735 miniflow_pad_to_64(mf, arp_tha);
736 }
737 }
738 goto out;
739 }
740
741 packet->l4_ofs = (char *)data - l2;
742 miniflow_push_be32(mf, nw_frag,
743 BYTES_TO_BE32(nw_frag, nw_tos, nw_ttl, nw_proto));
744
745 if (OVS_LIKELY(!(nw_frag & FLOW_NW_FRAG_LATER))) {
746 if (OVS_LIKELY(nw_proto == IPPROTO_TCP)) {
747 if (OVS_LIKELY(size >= TCP_HEADER_LEN)) {
748 const struct tcp_header *tcp = data;
749
750 miniflow_push_be32(mf, arp_tha.ea[2], 0);
751 miniflow_push_be32(mf, tcp_flags,
752 TCP_FLAGS_BE32(tcp->tcp_ctl));
753 miniflow_push_be16(mf, tp_src, tcp->tcp_src);
754 miniflow_push_be16(mf, tp_dst, tcp->tcp_dst);
755 miniflow_pad_to_64(mf, tp_dst);
756 }
757 } else if (OVS_LIKELY(nw_proto == IPPROTO_UDP)) {
758 if (OVS_LIKELY(size >= UDP_HEADER_LEN)) {
759 const struct udp_header *udp = data;
760
761 miniflow_push_be16(mf, tp_src, udp->udp_src);
762 miniflow_push_be16(mf, tp_dst, udp->udp_dst);
763 miniflow_pad_to_64(mf, tp_dst);
764 }
765 } else if (OVS_LIKELY(nw_proto == IPPROTO_SCTP)) {
766 if (OVS_LIKELY(size >= SCTP_HEADER_LEN)) {
767 const struct sctp_header *sctp = data;
768
769 miniflow_push_be16(mf, tp_src, sctp->sctp_src);
770 miniflow_push_be16(mf, tp_dst, sctp->sctp_dst);
771 miniflow_pad_to_64(mf, tp_dst);
772 }
773 } else if (OVS_LIKELY(nw_proto == IPPROTO_ICMP)) {
774 if (OVS_LIKELY(size >= ICMP_HEADER_LEN)) {
775 const struct icmp_header *icmp = data;
776
777 miniflow_push_be16(mf, tp_src, htons(icmp->icmp_type));
778 miniflow_push_be16(mf, tp_dst, htons(icmp->icmp_code));
779 miniflow_pad_to_64(mf, tp_dst);
780 }
781 } else if (OVS_LIKELY(nw_proto == IPPROTO_IGMP)) {
782 if (OVS_LIKELY(size >= IGMP_HEADER_LEN)) {
783 const struct igmp_header *igmp = data;
784
785 miniflow_push_be16(mf, tp_src, htons(igmp->igmp_type));
786 miniflow_push_be16(mf, tp_dst, htons(igmp->igmp_code));
787 miniflow_push_be32(mf, igmp_group_ip4,
788 get_16aligned_be32(&igmp->group));
789 }
790 } else if (OVS_LIKELY(nw_proto == IPPROTO_ICMPV6)) {
791 if (OVS_LIKELY(size >= sizeof(struct icmp6_hdr))) {
792 const struct in6_addr *nd_target = NULL;
793 struct eth_addr arp_buf[2] = { { { { 0 } } } };
794 const struct icmp6_hdr *icmp = data_pull(&data, &size,
795 sizeof *icmp);
796 parse_icmpv6(&data, &size, icmp, &nd_target, arp_buf);
797 if (nd_target) {
798 miniflow_push_words(mf, nd_target, nd_target,
799 sizeof *nd_target / sizeof(uint64_t));
800 }
801 miniflow_push_macs(mf, arp_sha, arp_buf);
802 miniflow_pad_to_64(mf, arp_tha);
803 miniflow_push_be16(mf, tp_src, htons(icmp->icmp6_type));
804 miniflow_push_be16(mf, tp_dst, htons(icmp->icmp6_code));
805 miniflow_pad_to_64(mf, tp_dst);
806 }
807 }
808 }
809 out:
810 dst->map = mf.map;
811 }
812
813 /* For every bit of a field that is wildcarded in 'wildcards', sets the
814 * corresponding bit in 'flow' to zero. */
815 void
816 flow_zero_wildcards(struct flow *flow, const struct flow_wildcards *wildcards)
817 {
818 uint64_t *flow_u64 = (uint64_t *) flow;
819 const uint64_t *wc_u64 = (const uint64_t *) &wildcards->masks;
820 size_t i;
821
822 for (i = 0; i < FLOW_U64S; i++) {
823 flow_u64[i] &= wc_u64[i];
824 }
825 }
826
827 void
828 flow_unwildcard_tp_ports(const struct flow *flow, struct flow_wildcards *wc)
829 {
830 if (flow->nw_proto != IPPROTO_ICMP) {
831 memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src);
832 memset(&wc->masks.tp_dst, 0xff, sizeof wc->masks.tp_dst);
833 } else {
834 wc->masks.tp_src = htons(0xff);
835 wc->masks.tp_dst = htons(0xff);
836 }
837 }
838
839 /* Initializes 'flow_metadata' with the metadata found in 'flow'. */
840 void
841 flow_get_metadata(const struct flow *flow, struct match *flow_metadata)
842 {
843 int i;
844
845 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 35);
846
847 match_init_catchall(flow_metadata);
848 if (flow->tunnel.tun_id != htonll(0)) {
849 match_set_tun_id(flow_metadata, flow->tunnel.tun_id);
850 }
851 if (flow->tunnel.flags & FLOW_TNL_PUB_F_MASK) {
852 match_set_tun_flags(flow_metadata,
853 flow->tunnel.flags & FLOW_TNL_PUB_F_MASK);
854 }
855 if (flow->tunnel.ip_src) {
856 match_set_tun_src(flow_metadata, flow->tunnel.ip_src);
857 }
858 if (flow->tunnel.ip_dst) {
859 match_set_tun_dst(flow_metadata, flow->tunnel.ip_dst);
860 }
861 if (ipv6_addr_is_set(&flow->tunnel.ipv6_src)) {
862 match_set_tun_ipv6_src(flow_metadata, &flow->tunnel.ipv6_src);
863 }
864 if (ipv6_addr_is_set(&flow->tunnel.ipv6_dst)) {
865 match_set_tun_ipv6_dst(flow_metadata, &flow->tunnel.ipv6_dst);
866 }
867 if (flow->tunnel.gbp_id != htons(0)) {
868 match_set_tun_gbp_id(flow_metadata, flow->tunnel.gbp_id);
869 }
870 if (flow->tunnel.gbp_flags) {
871 match_set_tun_gbp_flags(flow_metadata, flow->tunnel.gbp_flags);
872 }
873 tun_metadata_get_fmd(&flow->tunnel, flow_metadata);
874 if (flow->metadata != htonll(0)) {
875 match_set_metadata(flow_metadata, flow->metadata);
876 }
877
878 for (i = 0; i < FLOW_N_REGS; i++) {
879 if (flow->regs[i]) {
880 match_set_reg(flow_metadata, i, flow->regs[i]);
881 }
882 }
883
884 if (flow->pkt_mark != 0) {
885 match_set_pkt_mark(flow_metadata, flow->pkt_mark);
886 }
887
888 match_set_in_port(flow_metadata, flow->in_port.ofp_port);
889 if (flow->ct_state != 0) {
890 match_set_ct_state(flow_metadata, flow->ct_state);
891 }
892 if (flow->ct_zone != 0) {
893 match_set_ct_zone(flow_metadata, flow->ct_zone);
894 }
895 if (flow->ct_mark != 0) {
896 match_set_ct_mark(flow_metadata, flow->ct_mark);
897 }
898 if (!ovs_u128_is_zero(flow->ct_label)) {
899 match_set_ct_label(flow_metadata, flow->ct_label);
900 }
901 }
902
903 const char *ct_state_to_string(uint32_t state)
904 {
905 switch (state) {
906 case CS_REPLY_DIR:
907 return "rpl";
908 case CS_TRACKED:
909 return "trk";
910 case CS_NEW:
911 return "new";
912 case CS_ESTABLISHED:
913 return "est";
914 case CS_RELATED:
915 return "rel";
916 case CS_INVALID:
917 return "inv";
918 case CS_SRC_NAT:
919 return "snat";
920 case CS_DST_NAT:
921 return "dnat";
922 default:
923 return NULL;
924 }
925 }
926
927 char *
928 flow_to_string(const struct flow *flow)
929 {
930 struct ds ds = DS_EMPTY_INITIALIZER;
931 flow_format(&ds, flow);
932 return ds_cstr(&ds);
933 }
934
935 const char *
936 flow_tun_flag_to_string(uint32_t flags)
937 {
938 switch (flags) {
939 case FLOW_TNL_F_DONT_FRAGMENT:
940 return "df";
941 case FLOW_TNL_F_CSUM:
942 return "csum";
943 case FLOW_TNL_F_KEY:
944 return "key";
945 case FLOW_TNL_F_OAM:
946 return "oam";
947 default:
948 return NULL;
949 }
950 }
951
952 void
953 format_flags(struct ds *ds, const char *(*bit_to_string)(uint32_t),
954 uint32_t flags, char del)
955 {
956 uint32_t bad = 0;
957
958 if (!flags) {
959 ds_put_char(ds, '0');
960 return;
961 }
962 while (flags) {
963 uint32_t bit = rightmost_1bit(flags);
964 const char *s;
965
966 s = bit_to_string(bit);
967 if (s) {
968 ds_put_format(ds, "%s%c", s, del);
969 } else {
970 bad |= bit;
971 }
972
973 flags &= ~bit;
974 }
975
976 if (bad) {
977 ds_put_format(ds, "0x%"PRIx32"%c", bad, del);
978 }
979 ds_chomp(ds, del);
980 }
981
982 void
983 format_flags_masked(struct ds *ds, const char *name,
984 const char *(*bit_to_string)(uint32_t), uint32_t flags,
985 uint32_t mask, uint32_t max_mask)
986 {
987 if (name) {
988 ds_put_format(ds, "%s%s=%s", colors.param, name, colors.end);
989 }
990
991 if (mask == max_mask) {
992 format_flags(ds, bit_to_string, flags, '|');
993 return;
994 }
995
996 if (!mask) {
997 ds_put_cstr(ds, "0/0");
998 return;
999 }
1000
1001 while (mask) {
1002 uint32_t bit = rightmost_1bit(mask);
1003 const char *s = bit_to_string(bit);
1004
1005 ds_put_format(ds, "%s%s", (flags & bit) ? "+" : "-",
1006 s ? s : "[Unknown]");
1007 mask &= ~bit;
1008 }
1009 }
1010
1011 /* Scans a string 's' of flags to determine their numerical value and
1012 * returns the number of characters parsed using 'bit_to_string' to
1013 * lookup flag names. Scanning continues until the character 'end' is
1014 * reached.
1015 *
1016 * In the event of a failure, a negative error code will be returned. In
1017 * addition, if 'res_string' is non-NULL then a descriptive string will
1018 * be returned incorporating the identifying string 'field_name'. This
1019 * error string must be freed by the caller.
1020 *
1021 * Upon success, the flag values will be stored in 'res_flags' and
1022 * optionally 'res_mask', if it is non-NULL (if it is NULL then any masks
1023 * present in the original string will be considered an error). The
1024 * caller may restrict the acceptable set of values through the mask
1025 * 'allowed'. */
1026 int
1027 parse_flags(const char *s, const char *(*bit_to_string)(uint32_t),
1028 char end, const char *field_name, char **res_string,
1029 uint32_t *res_flags, uint32_t allowed, uint32_t *res_mask)
1030 {
1031 uint32_t result = 0;
1032 int n;
1033
1034 /* Parse masked flags in numeric format? */
1035 if (res_mask && ovs_scan(s, "%"SCNi32"/%"SCNi32"%n",
1036 res_flags, res_mask, &n) && n > 0) {
1037 if (*res_flags & ~allowed || *res_mask & ~allowed) {
1038 goto unknown;
1039 }
1040 return n;
1041 }
1042
1043 n = 0;
1044
1045 if (res_mask && (*s == '+' || *s == '-')) {
1046 uint32_t flags = 0, mask = 0;
1047
1048 /* Parse masked flags. */
1049 while (s[0] != end) {
1050 bool set;
1051 uint32_t bit;
1052 size_t len;
1053
1054 if (s[0] == '+') {
1055 set = true;
1056 } else if (s[0] == '-') {
1057 set = false;
1058 } else {
1059 if (res_string) {
1060 *res_string = xasprintf("%s: %s must be preceded by '+' "
1061 "(for SET) or '-' (NOT SET)", s,
1062 field_name);
1063 }
1064 return -EINVAL;
1065 }
1066 s++;
1067 n++;
1068
1069 for (bit = 1; bit; bit <<= 1) {
1070 const char *fname = bit_to_string(bit);
1071
1072 if (!fname) {
1073 continue;
1074 }
1075
1076 len = strlen(fname);
1077 if (strncmp(s, fname, len) ||
1078 (s[len] != '+' && s[len] != '-' && s[len] != end)) {
1079 continue;
1080 }
1081
1082 if (mask & bit) {
1083 /* bit already set. */
1084 if (res_string) {
1085 *res_string = xasprintf("%s: Each %s flag can be "
1086 "specified only once", s,
1087 field_name);
1088 }
1089 return -EINVAL;
1090 }
1091 if (!(bit & allowed)) {
1092 goto unknown;
1093 }
1094 if (set) {
1095 flags |= bit;
1096 }
1097 mask |= bit;
1098 break;
1099 }
1100
1101 if (!bit) {
1102 goto unknown;
1103 }
1104 s += len;
1105 n += len;
1106 }
1107
1108 *res_flags = flags;
1109 *res_mask = mask;
1110 return n;
1111 }
1112
1113 /* Parse unmasked flags. If a flag is present, it is set, otherwise
1114 * it is not set. */
1115 while (s[n] != end) {
1116 unsigned long long int flags;
1117 uint32_t bit;
1118 int n0;
1119
1120 if (ovs_scan(&s[n], "%lli%n", &flags, &n0)) {
1121 if (flags & ~allowed) {
1122 goto unknown;
1123 }
1124 n += n0 + (s[n + n0] == '|');
1125 result |= flags;
1126 continue;
1127 }
1128
1129 for (bit = 1; bit; bit <<= 1) {
1130 const char *name = bit_to_string(bit);
1131 size_t len;
1132
1133 if (!name) {
1134 continue;
1135 }
1136
1137 len = strlen(name);
1138 if (!strncmp(s + n, name, len) &&
1139 (s[n + len] == '|' || s[n + len] == end)) {
1140 if (!(bit & allowed)) {
1141 goto unknown;
1142 }
1143 result |= bit;
1144 n += len + (s[n + len] == '|');
1145 break;
1146 }
1147 }
1148
1149 if (!bit) {
1150 goto unknown;
1151 }
1152 }
1153
1154 *res_flags = result;
1155 if (res_mask) {
1156 *res_mask = UINT32_MAX;
1157 }
1158 if (res_string) {
1159 *res_string = NULL;
1160 }
1161 return n;
1162
1163 unknown:
1164 if (res_string) {
1165 *res_string = xasprintf("%s: unknown %s flag(s)", s, field_name);
1166 }
1167 return -EINVAL;
1168 }
1169
1170 void
1171 flow_format(struct ds *ds, const struct flow *flow)
1172 {
1173 struct match match;
1174 struct flow_wildcards *wc = &match.wc;
1175
1176 match_wc_init(&match, flow);
1177
1178 /* As this function is most often used for formatting a packet in a
1179 * packet-in message, skip formatting the packet context fields that are
1180 * all-zeroes to make the print-out easier on the eyes. This means that a
1181 * missing context field implies a zero value for that field. This is
1182 * similar to OpenFlow encoding of these fields, as the specification
1183 * states that all-zeroes context fields should not be encoded in the
1184 * packet-in messages. */
1185 if (!flow->in_port.ofp_port) {
1186 WC_UNMASK_FIELD(wc, in_port);
1187 }
1188 if (!flow->skb_priority) {
1189 WC_UNMASK_FIELD(wc, skb_priority);
1190 }
1191 if (!flow->pkt_mark) {
1192 WC_UNMASK_FIELD(wc, pkt_mark);
1193 }
1194 if (!flow->recirc_id) {
1195 WC_UNMASK_FIELD(wc, recirc_id);
1196 }
1197 if (!flow->dp_hash) {
1198 WC_UNMASK_FIELD(wc, dp_hash);
1199 }
1200 if (!flow->ct_state) {
1201 WC_UNMASK_FIELD(wc, ct_state);
1202 }
1203 if (!flow->ct_zone) {
1204 WC_UNMASK_FIELD(wc, ct_zone);
1205 }
1206 if (!flow->ct_mark) {
1207 WC_UNMASK_FIELD(wc, ct_mark);
1208 }
1209 if (ovs_u128_is_zero(flow->ct_label)) {
1210 WC_UNMASK_FIELD(wc, ct_label);
1211 }
1212 for (int i = 0; i < FLOW_N_REGS; i++) {
1213 if (!flow->regs[i]) {
1214 WC_UNMASK_FIELD(wc, regs[i]);
1215 }
1216 }
1217 if (!flow->metadata) {
1218 WC_UNMASK_FIELD(wc, metadata);
1219 }
1220
1221 match_format(&match, ds, OFP_DEFAULT_PRIORITY);
1222 }
1223
1224 void
1225 flow_print(FILE *stream, const struct flow *flow)
1226 {
1227 char *s = flow_to_string(flow);
1228 fputs(s, stream);
1229 free(s);
1230 }
1231 \f
1232 /* flow_wildcards functions. */
1233
1234 /* Initializes 'wc' as a set of wildcards that matches every packet. */
1235 void
1236 flow_wildcards_init_catchall(struct flow_wildcards *wc)
1237 {
1238 memset(&wc->masks, 0, sizeof wc->masks);
1239 }
1240
1241 /* Converts a flow into flow wildcards. It sets the wildcard masks based on
1242 * the packet headers extracted to 'flow'. It will not set the mask for fields
1243 * that do not make sense for the packet type. OpenFlow-only metadata is
1244 * wildcarded, but other metadata is unconditionally exact-matched. */
1245 void flow_wildcards_init_for_packet(struct flow_wildcards *wc,
1246 const struct flow *flow)
1247 {
1248 memset(&wc->masks, 0x0, sizeof wc->masks);
1249
1250 /* Update this function whenever struct flow changes. */
1251 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 35);
1252
1253 if (flow_tnl_dst_is_set(&flow->tunnel)) {
1254 if (flow->tunnel.flags & FLOW_TNL_F_KEY) {
1255 WC_MASK_FIELD(wc, tunnel.tun_id);
1256 }
1257 WC_MASK_FIELD(wc, tunnel.ip_src);
1258 WC_MASK_FIELD(wc, tunnel.ip_dst);
1259 WC_MASK_FIELD(wc, tunnel.ipv6_src);
1260 WC_MASK_FIELD(wc, tunnel.ipv6_dst);
1261 WC_MASK_FIELD(wc, tunnel.flags);
1262 WC_MASK_FIELD(wc, tunnel.ip_tos);
1263 WC_MASK_FIELD(wc, tunnel.ip_ttl);
1264 WC_MASK_FIELD(wc, tunnel.tp_src);
1265 WC_MASK_FIELD(wc, tunnel.tp_dst);
1266 WC_MASK_FIELD(wc, tunnel.gbp_id);
1267 WC_MASK_FIELD(wc, tunnel.gbp_flags);
1268
1269 if (!(flow->tunnel.flags & FLOW_TNL_F_UDPIF)) {
1270 if (flow->tunnel.metadata.present.map) {
1271 wc->masks.tunnel.metadata.present.map =
1272 flow->tunnel.metadata.present.map;
1273 WC_MASK_FIELD(wc, tunnel.metadata.opts.u8);
1274 }
1275 } else {
1276 WC_MASK_FIELD(wc, tunnel.metadata.present.len);
1277 memset(wc->masks.tunnel.metadata.opts.gnv, 0xff,
1278 flow->tunnel.metadata.present.len);
1279 }
1280 } else if (flow->tunnel.tun_id) {
1281 WC_MASK_FIELD(wc, tunnel.tun_id);
1282 }
1283
1284 /* metadata, regs, and conj_id wildcarded. */
1285
1286 WC_MASK_FIELD(wc, skb_priority);
1287 WC_MASK_FIELD(wc, pkt_mark);
1288 WC_MASK_FIELD(wc, ct_state);
1289 WC_MASK_FIELD(wc, ct_zone);
1290 WC_MASK_FIELD(wc, ct_mark);
1291 WC_MASK_FIELD(wc, ct_label);
1292 WC_MASK_FIELD(wc, recirc_id);
1293 WC_MASK_FIELD(wc, dp_hash);
1294 WC_MASK_FIELD(wc, in_port);
1295
1296 /* actset_output wildcarded. */
1297
1298 WC_MASK_FIELD(wc, dl_dst);
1299 WC_MASK_FIELD(wc, dl_src);
1300 WC_MASK_FIELD(wc, dl_type);
1301 WC_MASK_FIELD(wc, vlan_tci);
1302
1303 if (flow->dl_type == htons(ETH_TYPE_IP)) {
1304 WC_MASK_FIELD(wc, nw_src);
1305 WC_MASK_FIELD(wc, nw_dst);
1306 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
1307 WC_MASK_FIELD(wc, ipv6_src);
1308 WC_MASK_FIELD(wc, ipv6_dst);
1309 WC_MASK_FIELD(wc, ipv6_label);
1310 } else if (flow->dl_type == htons(ETH_TYPE_ARP) ||
1311 flow->dl_type == htons(ETH_TYPE_RARP)) {
1312 WC_MASK_FIELD(wc, nw_src);
1313 WC_MASK_FIELD(wc, nw_dst);
1314 WC_MASK_FIELD(wc, nw_proto);
1315 WC_MASK_FIELD(wc, arp_sha);
1316 WC_MASK_FIELD(wc, arp_tha);
1317 return;
1318 } else if (eth_type_mpls(flow->dl_type)) {
1319 for (int i = 0; i < FLOW_MAX_MPLS_LABELS; i++) {
1320 WC_MASK_FIELD(wc, mpls_lse[i]);
1321 if (flow->mpls_lse[i] & htonl(MPLS_BOS_MASK)) {
1322 break;
1323 }
1324 }
1325 return;
1326 } else {
1327 return; /* Unknown ethertype. */
1328 }
1329
1330 /* IPv4 or IPv6. */
1331 WC_MASK_FIELD(wc, nw_frag);
1332 WC_MASK_FIELD(wc, nw_tos);
1333 WC_MASK_FIELD(wc, nw_ttl);
1334 WC_MASK_FIELD(wc, nw_proto);
1335
1336 /* No transport layer header in later fragments. */
1337 if (!(flow->nw_frag & FLOW_NW_FRAG_LATER) &&
1338 (flow->nw_proto == IPPROTO_ICMP ||
1339 flow->nw_proto == IPPROTO_ICMPV6 ||
1340 flow->nw_proto == IPPROTO_TCP ||
1341 flow->nw_proto == IPPROTO_UDP ||
1342 flow->nw_proto == IPPROTO_SCTP ||
1343 flow->nw_proto == IPPROTO_IGMP)) {
1344 WC_MASK_FIELD(wc, tp_src);
1345 WC_MASK_FIELD(wc, tp_dst);
1346
1347 if (flow->nw_proto == IPPROTO_TCP) {
1348 WC_MASK_FIELD(wc, tcp_flags);
1349 } else if (flow->nw_proto == IPPROTO_ICMPV6) {
1350 WC_MASK_FIELD(wc, arp_sha);
1351 WC_MASK_FIELD(wc, arp_tha);
1352 WC_MASK_FIELD(wc, nd_target);
1353 } else if (flow->nw_proto == IPPROTO_IGMP) {
1354 WC_MASK_FIELD(wc, igmp_group_ip4);
1355 }
1356 }
1357 }
1358
1359 /* Return a map of possible fields for a packet of the same type as 'flow'.
1360 * Including extra bits in the returned mask is not wrong, it is just less
1361 * optimal.
1362 *
1363 * This is a less precise version of flow_wildcards_init_for_packet() above. */
1364 void
1365 flow_wc_map(const struct flow *flow, struct flowmap *map)
1366 {
1367 /* Update this function whenever struct flow changes. */
1368 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 35);
1369
1370 flowmap_init(map);
1371
1372 if (flow_tnl_dst_is_set(&flow->tunnel)) {
1373 FLOWMAP_SET__(map, tunnel, offsetof(struct flow_tnl, metadata));
1374 if (!(flow->tunnel.flags & FLOW_TNL_F_UDPIF)) {
1375 if (flow->tunnel.metadata.present.map) {
1376 FLOWMAP_SET(map, tunnel.metadata);
1377 }
1378 } else {
1379 FLOWMAP_SET(map, tunnel.metadata.present.len);
1380 FLOWMAP_SET__(map, tunnel.metadata.opts.gnv,
1381 flow->tunnel.metadata.present.len);
1382 }
1383 }
1384
1385 /* Metadata fields that can appear on packet input. */
1386 FLOWMAP_SET(map, skb_priority);
1387 FLOWMAP_SET(map, pkt_mark);
1388 FLOWMAP_SET(map, recirc_id);
1389 FLOWMAP_SET(map, dp_hash);
1390 FLOWMAP_SET(map, in_port);
1391 FLOWMAP_SET(map, dl_dst);
1392 FLOWMAP_SET(map, dl_src);
1393 FLOWMAP_SET(map, dl_type);
1394 FLOWMAP_SET(map, vlan_tci);
1395 FLOWMAP_SET(map, ct_state);
1396 FLOWMAP_SET(map, ct_zone);
1397 FLOWMAP_SET(map, ct_mark);
1398 FLOWMAP_SET(map, ct_label);
1399
1400 /* Ethertype-dependent fields. */
1401 if (OVS_LIKELY(flow->dl_type == htons(ETH_TYPE_IP))) {
1402 FLOWMAP_SET(map, nw_src);
1403 FLOWMAP_SET(map, nw_dst);
1404 FLOWMAP_SET(map, nw_proto);
1405 FLOWMAP_SET(map, nw_frag);
1406 FLOWMAP_SET(map, nw_tos);
1407 FLOWMAP_SET(map, nw_ttl);
1408 FLOWMAP_SET(map, tp_src);
1409 FLOWMAP_SET(map, tp_dst);
1410
1411 if (OVS_UNLIKELY(flow->nw_proto == IPPROTO_IGMP)) {
1412 FLOWMAP_SET(map, igmp_group_ip4);
1413 } else {
1414 FLOWMAP_SET(map, tcp_flags);
1415 }
1416 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
1417 FLOWMAP_SET(map, ipv6_src);
1418 FLOWMAP_SET(map, ipv6_dst);
1419 FLOWMAP_SET(map, ipv6_label);
1420 FLOWMAP_SET(map, nw_proto);
1421 FLOWMAP_SET(map, nw_frag);
1422 FLOWMAP_SET(map, nw_tos);
1423 FLOWMAP_SET(map, nw_ttl);
1424 FLOWMAP_SET(map, tp_src);
1425 FLOWMAP_SET(map, tp_dst);
1426
1427 if (OVS_UNLIKELY(flow->nw_proto == IPPROTO_ICMPV6)) {
1428 FLOWMAP_SET(map, nd_target);
1429 FLOWMAP_SET(map, arp_sha);
1430 FLOWMAP_SET(map, arp_tha);
1431 } else {
1432 FLOWMAP_SET(map, tcp_flags);
1433 }
1434 } else if (eth_type_mpls(flow->dl_type)) {
1435 FLOWMAP_SET(map, mpls_lse);
1436 } else if (flow->dl_type == htons(ETH_TYPE_ARP) ||
1437 flow->dl_type == htons(ETH_TYPE_RARP)) {
1438 FLOWMAP_SET(map, nw_src);
1439 FLOWMAP_SET(map, nw_dst);
1440 FLOWMAP_SET(map, nw_proto);
1441 FLOWMAP_SET(map, arp_sha);
1442 FLOWMAP_SET(map, arp_tha);
1443 }
1444 }
1445
1446 /* Clear the metadata and register wildcard masks. They are not packet
1447 * header fields. */
1448 void
1449 flow_wildcards_clear_non_packet_fields(struct flow_wildcards *wc)
1450 {
1451 /* Update this function whenever struct flow changes. */
1452 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 35);
1453
1454 memset(&wc->masks.metadata, 0, sizeof wc->masks.metadata);
1455 memset(&wc->masks.regs, 0, sizeof wc->masks.regs);
1456 wc->masks.actset_output = 0;
1457 wc->masks.conj_id = 0;
1458 }
1459
1460 /* Returns true if 'wc' matches every packet, false if 'wc' fixes any bits or
1461 * fields. */
1462 bool
1463 flow_wildcards_is_catchall(const struct flow_wildcards *wc)
1464 {
1465 const uint64_t *wc_u64 = (const uint64_t *) &wc->masks;
1466 size_t i;
1467
1468 for (i = 0; i < FLOW_U64S; i++) {
1469 if (wc_u64[i]) {
1470 return false;
1471 }
1472 }
1473 return true;
1474 }
1475
1476 /* Sets 'dst' as the bitwise AND of wildcards in 'src1' and 'src2'.
1477 * That is, a bit or a field is wildcarded in 'dst' if it is wildcarded
1478 * in 'src1' or 'src2' or both. */
1479 void
1480 flow_wildcards_and(struct flow_wildcards *dst,
1481 const struct flow_wildcards *src1,
1482 const struct flow_wildcards *src2)
1483 {
1484 uint64_t *dst_u64 = (uint64_t *) &dst->masks;
1485 const uint64_t *src1_u64 = (const uint64_t *) &src1->masks;
1486 const uint64_t *src2_u64 = (const uint64_t *) &src2->masks;
1487 size_t i;
1488
1489 for (i = 0; i < FLOW_U64S; i++) {
1490 dst_u64[i] = src1_u64[i] & src2_u64[i];
1491 }
1492 }
1493
1494 /* Sets 'dst' as the bitwise OR of wildcards in 'src1' and 'src2'. That
1495 * is, a bit or a field is wildcarded in 'dst' if it is neither
1496 * wildcarded in 'src1' nor 'src2'. */
1497 void
1498 flow_wildcards_or(struct flow_wildcards *dst,
1499 const struct flow_wildcards *src1,
1500 const struct flow_wildcards *src2)
1501 {
1502 uint64_t *dst_u64 = (uint64_t *) &dst->masks;
1503 const uint64_t *src1_u64 = (const uint64_t *) &src1->masks;
1504 const uint64_t *src2_u64 = (const uint64_t *) &src2->masks;
1505 size_t i;
1506
1507 for (i = 0; i < FLOW_U64S; i++) {
1508 dst_u64[i] = src1_u64[i] | src2_u64[i];
1509 }
1510 }
1511
1512 /* Returns a hash of the wildcards in 'wc'. */
1513 uint32_t
1514 flow_wildcards_hash(const struct flow_wildcards *wc, uint32_t basis)
1515 {
1516 return flow_hash(&wc->masks, basis);
1517 }
1518
1519 /* Returns true if 'a' and 'b' represent the same wildcards, false if they are
1520 * different. */
1521 bool
1522 flow_wildcards_equal(const struct flow_wildcards *a,
1523 const struct flow_wildcards *b)
1524 {
1525 return flow_equal(&a->masks, &b->masks);
1526 }
1527
1528 /* Returns true if at least one bit or field is wildcarded in 'a' but not in
1529 * 'b', false otherwise. */
1530 bool
1531 flow_wildcards_has_extra(const struct flow_wildcards *a,
1532 const struct flow_wildcards *b)
1533 {
1534 const uint64_t *a_u64 = (const uint64_t *) &a->masks;
1535 const uint64_t *b_u64 = (const uint64_t *) &b->masks;
1536 size_t i;
1537
1538 for (i = 0; i < FLOW_U64S; i++) {
1539 if ((a_u64[i] & b_u64[i]) != b_u64[i]) {
1540 return true;
1541 }
1542 }
1543 return false;
1544 }
1545
1546 /* Returns true if 'a' and 'b' are equal, except that 0-bits (wildcarded bits)
1547 * in 'wc' do not need to be equal in 'a' and 'b'. */
1548 bool
1549 flow_equal_except(const struct flow *a, const struct flow *b,
1550 const struct flow_wildcards *wc)
1551 {
1552 const uint64_t *a_u64 = (const uint64_t *) a;
1553 const uint64_t *b_u64 = (const uint64_t *) b;
1554 const uint64_t *wc_u64 = (const uint64_t *) &wc->masks;
1555 size_t i;
1556
1557 for (i = 0; i < FLOW_U64S; i++) {
1558 if ((a_u64[i] ^ b_u64[i]) & wc_u64[i]) {
1559 return false;
1560 }
1561 }
1562 return true;
1563 }
1564
1565 /* Sets the wildcard mask for register 'idx' in 'wc' to 'mask'.
1566 * (A 0-bit indicates a wildcard bit.) */
1567 void
1568 flow_wildcards_set_reg_mask(struct flow_wildcards *wc, int idx, uint32_t mask)
1569 {
1570 wc->masks.regs[idx] = mask;
1571 }
1572
1573 /* Sets the wildcard mask for register 'idx' in 'wc' to 'mask'.
1574 * (A 0-bit indicates a wildcard bit.) */
1575 void
1576 flow_wildcards_set_xreg_mask(struct flow_wildcards *wc, int idx, uint64_t mask)
1577 {
1578 flow_set_xreg(&wc->masks, idx, mask);
1579 }
1580
1581 /* Calculates the 5-tuple hash from the given miniflow.
1582 * This returns the same value as flow_hash_5tuple for the corresponding
1583 * flow. */
1584 uint32_t
1585 miniflow_hash_5tuple(const struct miniflow *flow, uint32_t basis)
1586 {
1587 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 35);
1588 uint32_t hash = basis;
1589
1590 if (flow) {
1591 ovs_be16 dl_type = MINIFLOW_GET_BE16(flow, dl_type);
1592 uint8_t nw_proto;
1593
1594 if (dl_type == htons(ETH_TYPE_IPV6)) {
1595 struct flowmap map = FLOWMAP_EMPTY_INITIALIZER;
1596 uint64_t value;
1597
1598 FLOWMAP_SET(&map, ipv6_src);
1599 FLOWMAP_SET(&map, ipv6_dst);
1600
1601 MINIFLOW_FOR_EACH_IN_FLOWMAP(value, flow, map) {
1602 hash = hash_add64(hash, value);
1603 }
1604 } else if (dl_type == htons(ETH_TYPE_IP)
1605 || dl_type == htons(ETH_TYPE_ARP)) {
1606 hash = hash_add(hash, MINIFLOW_GET_U32(flow, nw_src));
1607 hash = hash_add(hash, MINIFLOW_GET_U32(flow, nw_dst));
1608 } else {
1609 goto out;
1610 }
1611
1612 nw_proto = MINIFLOW_GET_U8(flow, nw_proto);
1613 hash = hash_add(hash, nw_proto);
1614 if (nw_proto != IPPROTO_TCP && nw_proto != IPPROTO_UDP
1615 && nw_proto != IPPROTO_SCTP && nw_proto != IPPROTO_ICMP
1616 && nw_proto != IPPROTO_ICMPV6) {
1617 goto out;
1618 }
1619
1620 /* Add both ports at once. */
1621 hash = hash_add(hash, MINIFLOW_GET_U32(flow, tp_src));
1622 }
1623 out:
1624 return hash_finish(hash, 42);
1625 }
1626
1627 ASSERT_SEQUENTIAL_SAME_WORD(tp_src, tp_dst);
1628 ASSERT_SEQUENTIAL(ipv6_src, ipv6_dst);
1629
1630 /* Calculates the 5-tuple hash from the given flow. */
1631 uint32_t
1632 flow_hash_5tuple(const struct flow *flow, uint32_t basis)
1633 {
1634 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 35);
1635 uint32_t hash = basis;
1636
1637 if (flow) {
1638
1639 if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
1640 const uint64_t *flow_u64 = (const uint64_t *)flow;
1641 int ofs = offsetof(struct flow, ipv6_src) / 8;
1642 int end = ofs + 2 * sizeof flow->ipv6_src / 8;
1643
1644 for (;ofs < end; ofs++) {
1645 hash = hash_add64(hash, flow_u64[ofs]);
1646 }
1647 } else if (flow->dl_type == htons(ETH_TYPE_IP)
1648 || flow->dl_type == htons(ETH_TYPE_ARP)) {
1649 hash = hash_add(hash, (OVS_FORCE uint32_t) flow->nw_src);
1650 hash = hash_add(hash, (OVS_FORCE uint32_t) flow->nw_dst);
1651 } else {
1652 goto out;
1653 }
1654
1655 hash = hash_add(hash, flow->nw_proto);
1656 if (flow->nw_proto != IPPROTO_TCP && flow->nw_proto != IPPROTO_UDP
1657 && flow->nw_proto != IPPROTO_SCTP && flow->nw_proto != IPPROTO_ICMP
1658 && flow->nw_proto != IPPROTO_ICMPV6) {
1659 goto out;
1660 }
1661
1662 /* Add both ports at once. */
1663 hash = hash_add(hash,
1664 ((const uint32_t *)flow)[offsetof(struct flow, tp_src)
1665 / sizeof(uint32_t)]);
1666 }
1667 out:
1668 return hash_finish(hash, 42); /* Arbitrary number. */
1669 }
1670
1671 /* Hashes 'flow' based on its L2 through L4 protocol information. */
1672 uint32_t
1673 flow_hash_symmetric_l4(const struct flow *flow, uint32_t basis)
1674 {
1675 struct {
1676 union {
1677 ovs_be32 ipv4_addr;
1678 struct in6_addr ipv6_addr;
1679 };
1680 ovs_be16 eth_type;
1681 ovs_be16 vlan_tci;
1682 ovs_be16 tp_port;
1683 struct eth_addr eth_addr;
1684 uint8_t ip_proto;
1685 } fields;
1686
1687 int i;
1688
1689 memset(&fields, 0, sizeof fields);
1690 for (i = 0; i < ARRAY_SIZE(fields.eth_addr.be16); i++) {
1691 fields.eth_addr.be16[i] = flow->dl_src.be16[i] ^ flow->dl_dst.be16[i];
1692 }
1693 fields.vlan_tci = flow->vlan_tci & htons(VLAN_VID_MASK);
1694 fields.eth_type = flow->dl_type;
1695
1696 /* UDP source and destination port are not taken into account because they
1697 * will not necessarily be symmetric in a bidirectional flow. */
1698 if (fields.eth_type == htons(ETH_TYPE_IP)) {
1699 fields.ipv4_addr = flow->nw_src ^ flow->nw_dst;
1700 fields.ip_proto = flow->nw_proto;
1701 if (fields.ip_proto == IPPROTO_TCP || fields.ip_proto == IPPROTO_SCTP) {
1702 fields.tp_port = flow->tp_src ^ flow->tp_dst;
1703 }
1704 } else if (fields.eth_type == htons(ETH_TYPE_IPV6)) {
1705 const uint8_t *a = &flow->ipv6_src.s6_addr[0];
1706 const uint8_t *b = &flow->ipv6_dst.s6_addr[0];
1707 uint8_t *ipv6_addr = &fields.ipv6_addr.s6_addr[0];
1708
1709 for (i=0; i<16; i++) {
1710 ipv6_addr[i] = a[i] ^ b[i];
1711 }
1712 fields.ip_proto = flow->nw_proto;
1713 if (fields.ip_proto == IPPROTO_TCP || fields.ip_proto == IPPROTO_SCTP) {
1714 fields.tp_port = flow->tp_src ^ flow->tp_dst;
1715 }
1716 }
1717 return jhash_bytes(&fields, sizeof fields, basis);
1718 }
1719
1720 /* Hashes 'flow' based on its L3 through L4 protocol information */
1721 uint32_t
1722 flow_hash_symmetric_l3l4(const struct flow *flow, uint32_t basis,
1723 bool inc_udp_ports)
1724 {
1725 uint32_t hash = basis;
1726
1727 /* UDP source and destination port are also taken into account. */
1728 if (flow->dl_type == htons(ETH_TYPE_IP)) {
1729 hash = hash_add(hash,
1730 (OVS_FORCE uint32_t) (flow->nw_src ^ flow->nw_dst));
1731 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
1732 /* IPv6 addresses are 64-bit aligned inside struct flow. */
1733 const uint64_t *a = ALIGNED_CAST(uint64_t *, flow->ipv6_src.s6_addr);
1734 const uint64_t *b = ALIGNED_CAST(uint64_t *, flow->ipv6_dst.s6_addr);
1735
1736 for (int i = 0; i < 4; i++) {
1737 hash = hash_add64(hash, a[i] ^ b[i]);
1738 }
1739 } else {
1740 /* Cannot hash non-IP flows */
1741 return 0;
1742 }
1743
1744 hash = hash_add(hash, flow->nw_proto);
1745 if (flow->nw_proto == IPPROTO_TCP || flow->nw_proto == IPPROTO_SCTP ||
1746 (inc_udp_ports && flow->nw_proto == IPPROTO_UDP)) {
1747 hash = hash_add(hash,
1748 (OVS_FORCE uint16_t) (flow->tp_src ^ flow->tp_dst));
1749 }
1750
1751 return hash_finish(hash, basis);
1752 }
1753
1754 /* Initialize a flow with random fields that matter for nx_hash_fields. */
1755 void
1756 flow_random_hash_fields(struct flow *flow)
1757 {
1758 uint16_t rnd = random_uint16();
1759
1760 /* Initialize to all zeros. */
1761 memset(flow, 0, sizeof *flow);
1762
1763 eth_addr_random(&flow->dl_src);
1764 eth_addr_random(&flow->dl_dst);
1765
1766 flow->vlan_tci = (OVS_FORCE ovs_be16) (random_uint16() & VLAN_VID_MASK);
1767
1768 /* Make most of the random flows IPv4, some IPv6, and rest random. */
1769 flow->dl_type = rnd < 0x8000 ? htons(ETH_TYPE_IP) :
1770 rnd < 0xc000 ? htons(ETH_TYPE_IPV6) : (OVS_FORCE ovs_be16)rnd;
1771
1772 if (dl_type_is_ip_any(flow->dl_type)) {
1773 if (flow->dl_type == htons(ETH_TYPE_IP)) {
1774 flow->nw_src = (OVS_FORCE ovs_be32)random_uint32();
1775 flow->nw_dst = (OVS_FORCE ovs_be32)random_uint32();
1776 } else {
1777 random_bytes(&flow->ipv6_src, sizeof flow->ipv6_src);
1778 random_bytes(&flow->ipv6_dst, sizeof flow->ipv6_dst);
1779 }
1780 /* Make most of IP flows TCP, some UDP or SCTP, and rest random. */
1781 rnd = random_uint16();
1782 flow->nw_proto = rnd < 0x8000 ? IPPROTO_TCP :
1783 rnd < 0xc000 ? IPPROTO_UDP :
1784 rnd < 0xd000 ? IPPROTO_SCTP : (uint8_t)rnd;
1785 if (flow->nw_proto == IPPROTO_TCP ||
1786 flow->nw_proto == IPPROTO_UDP ||
1787 flow->nw_proto == IPPROTO_SCTP) {
1788 flow->tp_src = (OVS_FORCE ovs_be16)random_uint16();
1789 flow->tp_dst = (OVS_FORCE ovs_be16)random_uint16();
1790 }
1791 }
1792 }
1793
1794 /* Masks the fields in 'wc' that are used by the flow hash 'fields'. */
1795 void
1796 flow_mask_hash_fields(const struct flow *flow, struct flow_wildcards *wc,
1797 enum nx_hash_fields fields)
1798 {
1799 switch (fields) {
1800 case NX_HASH_FIELDS_ETH_SRC:
1801 memset(&wc->masks.dl_src, 0xff, sizeof wc->masks.dl_src);
1802 break;
1803
1804 case NX_HASH_FIELDS_SYMMETRIC_L4:
1805 memset(&wc->masks.dl_src, 0xff, sizeof wc->masks.dl_src);
1806 memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
1807 if (flow->dl_type == htons(ETH_TYPE_IP)) {
1808 memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src);
1809 memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
1810 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
1811 memset(&wc->masks.ipv6_src, 0xff, sizeof wc->masks.ipv6_src);
1812 memset(&wc->masks.ipv6_dst, 0xff, sizeof wc->masks.ipv6_dst);
1813 }
1814 if (is_ip_any(flow)) {
1815 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
1816 flow_unwildcard_tp_ports(flow, wc);
1817 }
1818 wc->masks.vlan_tci |= htons(VLAN_VID_MASK | VLAN_CFI);
1819 break;
1820
1821 case NX_HASH_FIELDS_SYMMETRIC_L3L4_UDP:
1822 if (is_ip_any(flow) && flow->nw_proto == IPPROTO_UDP) {
1823 memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src);
1824 memset(&wc->masks.tp_dst, 0xff, sizeof wc->masks.tp_dst);
1825 }
1826 /* no break */
1827 case NX_HASH_FIELDS_SYMMETRIC_L3L4:
1828 if (flow->dl_type == htons(ETH_TYPE_IP)) {
1829 memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src);
1830 memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
1831 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
1832 memset(&wc->masks.ipv6_src, 0xff, sizeof wc->masks.ipv6_src);
1833 memset(&wc->masks.ipv6_dst, 0xff, sizeof wc->masks.ipv6_dst);
1834 } else {
1835 break; /* non-IP flow */
1836 }
1837
1838 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
1839 if (flow->nw_proto == IPPROTO_TCP || flow->nw_proto == IPPROTO_SCTP) {
1840 memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src);
1841 memset(&wc->masks.tp_dst, 0xff, sizeof wc->masks.tp_dst);
1842 }
1843 break;
1844
1845 default:
1846 OVS_NOT_REACHED();
1847 }
1848 }
1849
1850 /* Hashes the portions of 'flow' designated by 'fields'. */
1851 uint32_t
1852 flow_hash_fields(const struct flow *flow, enum nx_hash_fields fields,
1853 uint16_t basis)
1854 {
1855 switch (fields) {
1856
1857 case NX_HASH_FIELDS_ETH_SRC:
1858 return jhash_bytes(&flow->dl_src, sizeof flow->dl_src, basis);
1859
1860 case NX_HASH_FIELDS_SYMMETRIC_L4:
1861 return flow_hash_symmetric_l4(flow, basis);
1862
1863 case NX_HASH_FIELDS_SYMMETRIC_L3L4:
1864 return flow_hash_symmetric_l3l4(flow, basis, false);
1865
1866 case NX_HASH_FIELDS_SYMMETRIC_L3L4_UDP:
1867 return flow_hash_symmetric_l3l4(flow, basis, true);
1868
1869 }
1870
1871 OVS_NOT_REACHED();
1872 }
1873
1874 /* Returns a string representation of 'fields'. */
1875 const char *
1876 flow_hash_fields_to_str(enum nx_hash_fields fields)
1877 {
1878 switch (fields) {
1879 case NX_HASH_FIELDS_ETH_SRC: return "eth_src";
1880 case NX_HASH_FIELDS_SYMMETRIC_L4: return "symmetric_l4";
1881 case NX_HASH_FIELDS_SYMMETRIC_L3L4: return "symmetric_l3l4";
1882 case NX_HASH_FIELDS_SYMMETRIC_L3L4_UDP: return "symmetric_l3l4+udp";
1883 default: return "<unknown>";
1884 }
1885 }
1886
1887 /* Returns true if the value of 'fields' is supported. Otherwise false. */
1888 bool
1889 flow_hash_fields_valid(enum nx_hash_fields fields)
1890 {
1891 return fields == NX_HASH_FIELDS_ETH_SRC
1892 || fields == NX_HASH_FIELDS_SYMMETRIC_L4
1893 || fields == NX_HASH_FIELDS_SYMMETRIC_L3L4
1894 || fields == NX_HASH_FIELDS_SYMMETRIC_L3L4_UDP;
1895 }
1896
1897 /* Returns a hash value for the bits of 'flow' that are active based on
1898 * 'wc', given 'basis'. */
1899 uint32_t
1900 flow_hash_in_wildcards(const struct flow *flow,
1901 const struct flow_wildcards *wc, uint32_t basis)
1902 {
1903 const uint64_t *wc_u64 = (const uint64_t *) &wc->masks;
1904 const uint64_t *flow_u64 = (const uint64_t *) flow;
1905 uint32_t hash;
1906 size_t i;
1907
1908 hash = basis;
1909 for (i = 0; i < FLOW_U64S; i++) {
1910 hash = hash_add64(hash, flow_u64[i] & wc_u64[i]);
1911 }
1912 return hash_finish(hash, 8 * FLOW_U64S);
1913 }
1914
1915 /* Sets the VLAN VID that 'flow' matches to 'vid', which is interpreted as an
1916 * OpenFlow 1.0 "dl_vlan" value:
1917 *
1918 * - If it is in the range 0...4095, 'flow->vlan_tci' is set to match
1919 * that VLAN. Any existing PCP match is unchanged (it becomes 0 if
1920 * 'flow' previously matched packets without a VLAN header).
1921 *
1922 * - If it is OFP_VLAN_NONE, 'flow->vlan_tci' is set to match a packet
1923 * without a VLAN tag.
1924 *
1925 * - Other values of 'vid' should not be used. */
1926 void
1927 flow_set_dl_vlan(struct flow *flow, ovs_be16 vid)
1928 {
1929 if (vid == htons(OFP10_VLAN_NONE)) {
1930 flow->vlan_tci = htons(0);
1931 } else {
1932 vid &= htons(VLAN_VID_MASK);
1933 flow->vlan_tci &= ~htons(VLAN_VID_MASK);
1934 flow->vlan_tci |= htons(VLAN_CFI) | vid;
1935 }
1936 }
1937
1938 /* Sets the VLAN VID that 'flow' matches to 'vid', which is interpreted as an
1939 * OpenFlow 1.2 "vlan_vid" value, that is, the low 13 bits of 'vlan_tci' (VID
1940 * plus CFI). */
1941 void
1942 flow_set_vlan_vid(struct flow *flow, ovs_be16 vid)
1943 {
1944 ovs_be16 mask = htons(VLAN_VID_MASK | VLAN_CFI);
1945 flow->vlan_tci &= ~mask;
1946 flow->vlan_tci |= vid & mask;
1947 }
1948
1949 /* Sets the VLAN PCP that 'flow' matches to 'pcp', which should be in the
1950 * range 0...7.
1951 *
1952 * This function has no effect on the VLAN ID that 'flow' matches.
1953 *
1954 * After calling this function, 'flow' will not match packets without a VLAN
1955 * header. */
1956 void
1957 flow_set_vlan_pcp(struct flow *flow, uint8_t pcp)
1958 {
1959 pcp &= 0x07;
1960 flow->vlan_tci &= ~htons(VLAN_PCP_MASK);
1961 flow->vlan_tci |= htons((pcp << VLAN_PCP_SHIFT) | VLAN_CFI);
1962 }
1963
1964 /* Returns the number of MPLS LSEs present in 'flow'
1965 *
1966 * Returns 0 if the 'dl_type' of 'flow' is not an MPLS ethernet type.
1967 * Otherwise traverses 'flow''s MPLS label stack stopping at the
1968 * first entry that has the BoS bit set. If no such entry exists then
1969 * the maximum number of LSEs that can be stored in 'flow' is returned.
1970 */
1971 int
1972 flow_count_mpls_labels(const struct flow *flow, struct flow_wildcards *wc)
1973 {
1974 /* dl_type is always masked. */
1975 if (eth_type_mpls(flow->dl_type)) {
1976 int i;
1977 int cnt;
1978
1979 cnt = 0;
1980 for (i = 0; i < FLOW_MAX_MPLS_LABELS; i++) {
1981 if (wc) {
1982 wc->masks.mpls_lse[i] |= htonl(MPLS_BOS_MASK);
1983 }
1984 if (flow->mpls_lse[i] & htonl(MPLS_BOS_MASK)) {
1985 return i + 1;
1986 }
1987 if (flow->mpls_lse[i]) {
1988 cnt++;
1989 }
1990 }
1991 return cnt;
1992 } else {
1993 return 0;
1994 }
1995 }
1996
1997 /* Returns the number consecutive of MPLS LSEs, starting at the
1998 * innermost LSE, that are common in 'a' and 'b'.
1999 *
2000 * 'an' must be flow_count_mpls_labels(a).
2001 * 'bn' must be flow_count_mpls_labels(b).
2002 */
2003 int
2004 flow_count_common_mpls_labels(const struct flow *a, int an,
2005 const struct flow *b, int bn,
2006 struct flow_wildcards *wc)
2007 {
2008 int min_n = MIN(an, bn);
2009 if (min_n == 0) {
2010 return 0;
2011 } else {
2012 int common_n = 0;
2013 int a_last = an - 1;
2014 int b_last = bn - 1;
2015 int i;
2016
2017 for (i = 0; i < min_n; i++) {
2018 if (wc) {
2019 wc->masks.mpls_lse[a_last - i] = OVS_BE32_MAX;
2020 wc->masks.mpls_lse[b_last - i] = OVS_BE32_MAX;
2021 }
2022 if (a->mpls_lse[a_last - i] != b->mpls_lse[b_last - i]) {
2023 break;
2024 } else {
2025 common_n++;
2026 }
2027 }
2028
2029 return common_n;
2030 }
2031 }
2032
2033 /* Adds a new outermost MPLS label to 'flow' and changes 'flow''s Ethernet type
2034 * to 'mpls_eth_type', which must be an MPLS Ethertype.
2035 *
2036 * If the new label is the first MPLS label in 'flow', it is generated as;
2037 *
2038 * - label: 2, if 'flow' is IPv6, otherwise 0.
2039 *
2040 * - TTL: IPv4 or IPv6 TTL, if present and nonzero, otherwise 64.
2041 *
2042 * - TC: IPv4 or IPv6 TOS, if present, otherwise 0.
2043 *
2044 * - BoS: 1.
2045 *
2046 * If the new label is the second or later label MPLS label in 'flow', it is
2047 * generated as;
2048 *
2049 * - label: Copied from outer label.
2050 *
2051 * - TTL: Copied from outer label.
2052 *
2053 * - TC: Copied from outer label.
2054 *
2055 * - BoS: 0.
2056 *
2057 * 'n' must be flow_count_mpls_labels(flow). 'n' must be less than
2058 * FLOW_MAX_MPLS_LABELS (because otherwise flow->mpls_lse[] would overflow).
2059 */
2060 void
2061 flow_push_mpls(struct flow *flow, int n, ovs_be16 mpls_eth_type,
2062 struct flow_wildcards *wc)
2063 {
2064 ovs_assert(eth_type_mpls(mpls_eth_type));
2065 ovs_assert(n < FLOW_MAX_MPLS_LABELS);
2066
2067 if (n) {
2068 int i;
2069
2070 if (wc) {
2071 memset(&wc->masks.mpls_lse, 0xff, sizeof *wc->masks.mpls_lse * n);
2072 }
2073 for (i = n; i >= 1; i--) {
2074 flow->mpls_lse[i] = flow->mpls_lse[i - 1];
2075 }
2076 flow->mpls_lse[0] = (flow->mpls_lse[1] & htonl(~MPLS_BOS_MASK));
2077 } else {
2078 int label = 0; /* IPv4 Explicit Null. */
2079 int tc = 0;
2080 int ttl = 64;
2081
2082 if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
2083 label = 2;
2084 }
2085
2086 if (is_ip_any(flow)) {
2087 tc = (flow->nw_tos & IP_DSCP_MASK) >> 2;
2088 if (wc) {
2089 wc->masks.nw_tos |= IP_DSCP_MASK;
2090 wc->masks.nw_ttl = 0xff;
2091 }
2092
2093 if (flow->nw_ttl) {
2094 ttl = flow->nw_ttl;
2095 }
2096 }
2097
2098 flow->mpls_lse[0] = set_mpls_lse_values(ttl, tc, 1, htonl(label));
2099
2100 /* Clear all L3 and L4 fields and dp_hash. */
2101 BUILD_ASSERT(FLOW_WC_SEQ == 35);
2102 memset((char *) flow + FLOW_SEGMENT_2_ENDS_AT, 0,
2103 sizeof(struct flow) - FLOW_SEGMENT_2_ENDS_AT);
2104 flow->dp_hash = 0;
2105 }
2106 flow->dl_type = mpls_eth_type;
2107 }
2108
2109 /* Tries to remove the outermost MPLS label from 'flow'. Returns true if
2110 * successful, false otherwise. On success, sets 'flow''s Ethernet type to
2111 * 'eth_type'.
2112 *
2113 * 'n' must be flow_count_mpls_labels(flow). */
2114 bool
2115 flow_pop_mpls(struct flow *flow, int n, ovs_be16 eth_type,
2116 struct flow_wildcards *wc)
2117 {
2118 int i;
2119
2120 if (n == 0) {
2121 /* Nothing to pop. */
2122 return false;
2123 } else if (n == FLOW_MAX_MPLS_LABELS) {
2124 if (wc) {
2125 wc->masks.mpls_lse[n - 1] |= htonl(MPLS_BOS_MASK);
2126 }
2127 if (!(flow->mpls_lse[n - 1] & htonl(MPLS_BOS_MASK))) {
2128 /* Can't pop because don't know what to fill in mpls_lse[n - 1]. */
2129 return false;
2130 }
2131 }
2132
2133 if (wc) {
2134 memset(&wc->masks.mpls_lse[1], 0xff,
2135 sizeof *wc->masks.mpls_lse * (n - 1));
2136 }
2137 for (i = 1; i < n; i++) {
2138 flow->mpls_lse[i - 1] = flow->mpls_lse[i];
2139 }
2140 flow->mpls_lse[n - 1] = 0;
2141 flow->dl_type = eth_type;
2142 return true;
2143 }
2144
2145 /* Sets the MPLS Label that 'flow' matches to 'label', which is interpreted
2146 * as an OpenFlow 1.1 "mpls_label" value. */
2147 void
2148 flow_set_mpls_label(struct flow *flow, int idx, ovs_be32 label)
2149 {
2150 set_mpls_lse_label(&flow->mpls_lse[idx], label);
2151 }
2152
2153 /* Sets the MPLS TTL that 'flow' matches to 'ttl', which should be in the
2154 * range 0...255. */
2155 void
2156 flow_set_mpls_ttl(struct flow *flow, int idx, uint8_t ttl)
2157 {
2158 set_mpls_lse_ttl(&flow->mpls_lse[idx], ttl);
2159 }
2160
2161 /* Sets the MPLS TC that 'flow' matches to 'tc', which should be in the
2162 * range 0...7. */
2163 void
2164 flow_set_mpls_tc(struct flow *flow, int idx, uint8_t tc)
2165 {
2166 set_mpls_lse_tc(&flow->mpls_lse[idx], tc);
2167 }
2168
2169 /* Sets the MPLS BOS bit that 'flow' matches to which should be 0 or 1. */
2170 void
2171 flow_set_mpls_bos(struct flow *flow, int idx, uint8_t bos)
2172 {
2173 set_mpls_lse_bos(&flow->mpls_lse[idx], bos);
2174 }
2175
2176 /* Sets the entire MPLS LSE. */
2177 void
2178 flow_set_mpls_lse(struct flow *flow, int idx, ovs_be32 lse)
2179 {
2180 flow->mpls_lse[idx] = lse;
2181 }
2182
2183 static size_t
2184 flow_compose_l4(struct dp_packet *p, const struct flow *flow)
2185 {
2186 size_t l4_len = 0;
2187
2188 if (!(flow->nw_frag & FLOW_NW_FRAG_ANY)
2189 || !(flow->nw_frag & FLOW_NW_FRAG_LATER)) {
2190 if (flow->nw_proto == IPPROTO_TCP) {
2191 struct tcp_header *tcp;
2192
2193 l4_len = sizeof *tcp;
2194 tcp = dp_packet_put_zeros(p, l4_len);
2195 tcp->tcp_src = flow->tp_src;
2196 tcp->tcp_dst = flow->tp_dst;
2197 tcp->tcp_ctl = TCP_CTL(ntohs(flow->tcp_flags), 5);
2198 } else if (flow->nw_proto == IPPROTO_UDP) {
2199 struct udp_header *udp;
2200
2201 l4_len = sizeof *udp;
2202 udp = dp_packet_put_zeros(p, l4_len);
2203 udp->udp_src = flow->tp_src;
2204 udp->udp_dst = flow->tp_dst;
2205 } else if (flow->nw_proto == IPPROTO_SCTP) {
2206 struct sctp_header *sctp;
2207
2208 l4_len = sizeof *sctp;
2209 sctp = dp_packet_put_zeros(p, l4_len);
2210 sctp->sctp_src = flow->tp_src;
2211 sctp->sctp_dst = flow->tp_dst;
2212 } else if (flow->nw_proto == IPPROTO_ICMP) {
2213 struct icmp_header *icmp;
2214
2215 l4_len = sizeof *icmp;
2216 icmp = dp_packet_put_zeros(p, l4_len);
2217 icmp->icmp_type = ntohs(flow->tp_src);
2218 icmp->icmp_code = ntohs(flow->tp_dst);
2219 icmp->icmp_csum = csum(icmp, ICMP_HEADER_LEN);
2220 } else if (flow->nw_proto == IPPROTO_IGMP) {
2221 struct igmp_header *igmp;
2222
2223 l4_len = sizeof *igmp;
2224 igmp = dp_packet_put_zeros(p, l4_len);
2225 igmp->igmp_type = ntohs(flow->tp_src);
2226 igmp->igmp_code = ntohs(flow->tp_dst);
2227 put_16aligned_be32(&igmp->group, flow->igmp_group_ip4);
2228 igmp->igmp_csum = csum(igmp, IGMP_HEADER_LEN);
2229 } else if (flow->nw_proto == IPPROTO_ICMPV6) {
2230 struct icmp6_hdr *icmp;
2231
2232 l4_len = sizeof *icmp;
2233 icmp = dp_packet_put_zeros(p, l4_len);
2234 icmp->icmp6_type = ntohs(flow->tp_src);
2235 icmp->icmp6_code = ntohs(flow->tp_dst);
2236
2237 if (icmp->icmp6_code == 0 &&
2238 (icmp->icmp6_type == ND_NEIGHBOR_SOLICIT ||
2239 icmp->icmp6_type == ND_NEIGHBOR_ADVERT)) {
2240 struct in6_addr *nd_target;
2241 struct ovs_nd_opt *nd_opt;
2242
2243 l4_len += sizeof *nd_target;
2244 nd_target = dp_packet_put_zeros(p, sizeof *nd_target);
2245 *nd_target = flow->nd_target;
2246
2247 if (!eth_addr_is_zero(flow->arp_sha)) {
2248 l4_len += 8;
2249 nd_opt = dp_packet_put_zeros(p, 8);
2250 nd_opt->nd_opt_len = 1;
2251 nd_opt->nd_opt_type = ND_OPT_SOURCE_LINKADDR;
2252 nd_opt->nd_opt_mac = flow->arp_sha;
2253 }
2254 if (!eth_addr_is_zero(flow->arp_tha)) {
2255 l4_len += 8;
2256 nd_opt = dp_packet_put_zeros(p, 8);
2257 nd_opt->nd_opt_len = 1;
2258 nd_opt->nd_opt_type = ND_OPT_TARGET_LINKADDR;
2259 nd_opt->nd_opt_mac = flow->arp_tha;
2260 }
2261 }
2262 icmp->icmp6_cksum = (OVS_FORCE uint16_t)
2263 csum(icmp, (char *)dp_packet_tail(p) - (char *)icmp);
2264 }
2265 }
2266 return l4_len;
2267 }
2268
2269 /* Puts into 'b' a packet that flow_extract() would parse as having the given
2270 * 'flow'.
2271 *
2272 * (This is useful only for testing, obviously, and the packet isn't really
2273 * valid. It hasn't got some checksums filled in, for one, and lots of fields
2274 * are just zeroed.) */
2275 void
2276 flow_compose(struct dp_packet *p, const struct flow *flow)
2277 {
2278 size_t l4_len;
2279
2280 /* eth_compose() sets l3 pointer and makes sure it is 32-bit aligned. */
2281 eth_compose(p, flow->dl_dst, flow->dl_src, ntohs(flow->dl_type), 0);
2282 if (flow->dl_type == htons(FLOW_DL_TYPE_NONE)) {
2283 struct eth_header *eth = dp_packet_l2(p);
2284 eth->eth_type = htons(dp_packet_size(p));
2285 return;
2286 }
2287
2288 if (flow->vlan_tci & htons(VLAN_CFI)) {
2289 eth_push_vlan(p, htons(ETH_TYPE_VLAN), flow->vlan_tci);
2290 }
2291
2292 if (flow->dl_type == htons(ETH_TYPE_IP)) {
2293 struct ip_header *ip;
2294
2295 ip = dp_packet_put_zeros(p, sizeof *ip);
2296 ip->ip_ihl_ver = IP_IHL_VER(5, 4);
2297 ip->ip_tos = flow->nw_tos;
2298 ip->ip_ttl = flow->nw_ttl;
2299 ip->ip_proto = flow->nw_proto;
2300 put_16aligned_be32(&ip->ip_src, flow->nw_src);
2301 put_16aligned_be32(&ip->ip_dst, flow->nw_dst);
2302
2303 if (flow->nw_frag & FLOW_NW_FRAG_ANY) {
2304 ip->ip_frag_off |= htons(IP_MORE_FRAGMENTS);
2305 if (flow->nw_frag & FLOW_NW_FRAG_LATER) {
2306 ip->ip_frag_off |= htons(100);
2307 }
2308 }
2309
2310 dp_packet_set_l4(p, dp_packet_tail(p));
2311
2312 l4_len = flow_compose_l4(p, flow);
2313
2314 ip = dp_packet_l3(p);
2315 ip->ip_tot_len = htons(p->l4_ofs - p->l3_ofs + l4_len);
2316 ip->ip_csum = csum(ip, sizeof *ip);
2317 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
2318 struct ovs_16aligned_ip6_hdr *nh;
2319
2320 nh = dp_packet_put_zeros(p, sizeof *nh);
2321 put_16aligned_be32(&nh->ip6_flow, htonl(6 << 28) |
2322 htonl(flow->nw_tos << 20) | flow->ipv6_label);
2323 nh->ip6_hlim = flow->nw_ttl;
2324 nh->ip6_nxt = flow->nw_proto;
2325
2326 memcpy(&nh->ip6_src, &flow->ipv6_src, sizeof(nh->ip6_src));
2327 memcpy(&nh->ip6_dst, &flow->ipv6_dst, sizeof(nh->ip6_dst));
2328
2329 dp_packet_set_l4(p, dp_packet_tail(p));
2330
2331 l4_len = flow_compose_l4(p, flow);
2332
2333 nh = dp_packet_l3(p);
2334 nh->ip6_plen = htons(l4_len);
2335 } else if (flow->dl_type == htons(ETH_TYPE_ARP) ||
2336 flow->dl_type == htons(ETH_TYPE_RARP)) {
2337 struct arp_eth_header *arp;
2338
2339 arp = dp_packet_put_zeros(p, sizeof *arp);
2340 dp_packet_set_l3(p, arp);
2341 arp->ar_hrd = htons(1);
2342 arp->ar_pro = htons(ETH_TYPE_IP);
2343 arp->ar_hln = ETH_ADDR_LEN;
2344 arp->ar_pln = 4;
2345 arp->ar_op = htons(flow->nw_proto);
2346
2347 if (flow->nw_proto == ARP_OP_REQUEST ||
2348 flow->nw_proto == ARP_OP_REPLY) {
2349 put_16aligned_be32(&arp->ar_spa, flow->nw_src);
2350 put_16aligned_be32(&arp->ar_tpa, flow->nw_dst);
2351 arp->ar_sha = flow->arp_sha;
2352 arp->ar_tha = flow->arp_tha;
2353 }
2354 }
2355
2356 if (eth_type_mpls(flow->dl_type)) {
2357 int n;
2358
2359 p->l2_5_ofs = p->l3_ofs;
2360 for (n = 1; n < FLOW_MAX_MPLS_LABELS; n++) {
2361 if (flow->mpls_lse[n - 1] & htonl(MPLS_BOS_MASK)) {
2362 break;
2363 }
2364 }
2365 while (n > 0) {
2366 push_mpls(p, flow->dl_type, flow->mpls_lse[--n]);
2367 }
2368 }
2369 }
2370 \f
2371 /* Compressed flow. */
2372
2373 /* Completes an initialization of 'dst' as a miniflow copy of 'src' begun by
2374 * the caller. The caller must have already computed 'dst->map' properly to
2375 * indicate the significant uint64_t elements of 'src'.
2376 *
2377 * Normally the significant elements are the ones that are non-zero. However,
2378 * when a miniflow is initialized from a (mini)mask, the values can be zeroes,
2379 * so that the flow and mask always have the same maps. */
2380 void
2381 miniflow_init(struct miniflow *dst, const struct flow *src)
2382 {
2383 uint64_t *dst_u64 = miniflow_values(dst);
2384 size_t idx;
2385
2386 FLOWMAP_FOR_EACH_INDEX(idx, dst->map) {
2387 *dst_u64++ = flow_u64_value(src, idx);
2388 }
2389 }
2390
2391 /* Initialize the maps of 'flow' from 'src'. */
2392 void
2393 miniflow_map_init(struct miniflow *flow, const struct flow *src)
2394 {
2395 /* Initialize map, counting the number of nonzero elements. */
2396 flowmap_init(&flow->map);
2397 for (size_t i = 0; i < FLOW_U64S; i++) {
2398 if (flow_u64_value(src, i)) {
2399 flowmap_set(&flow->map, i, 1);
2400 }
2401 }
2402 }
2403
2404 /* Allocates 'n' count of miniflows, consecutive in memory, initializing the
2405 * map of each from 'src'.
2406 * Returns the size of the miniflow data. */
2407 size_t
2408 miniflow_alloc(struct miniflow *dsts[], size_t n, const struct miniflow *src)
2409 {
2410 size_t n_values = miniflow_n_values(src);
2411 size_t data_size = MINIFLOW_VALUES_SIZE(n_values);
2412 struct miniflow *dst = xmalloc(n * (sizeof *src + data_size));
2413 size_t i;
2414
2415 COVERAGE_INC(miniflow_malloc);
2416
2417 for (i = 0; i < n; i++) {
2418 *dst = *src; /* Copy maps. */
2419 dsts[i] = dst;
2420 dst += 1; /* Just past the maps. */
2421 dst = (struct miniflow *)((uint64_t *)dst + n_values); /* Skip data. */
2422 }
2423 return data_size;
2424 }
2425
2426 /* Returns a miniflow copy of 'src'. The caller must eventually free() the
2427 * returned miniflow. */
2428 struct miniflow *
2429 miniflow_create(const struct flow *src)
2430 {
2431 struct miniflow tmp;
2432 struct miniflow *dst;
2433
2434 miniflow_map_init(&tmp, src);
2435
2436 miniflow_alloc(&dst, 1, &tmp);
2437 miniflow_init(dst, src);
2438 return dst;
2439 }
2440
2441 /* Initializes 'dst' as a copy of 'src'. The caller must have allocated
2442 * 'dst' to have inline space for 'n_values' data in 'src'. */
2443 void
2444 miniflow_clone(struct miniflow *dst, const struct miniflow *src,
2445 size_t n_values)
2446 {
2447 *dst = *src; /* Copy maps. */
2448 memcpy(miniflow_values(dst), miniflow_get_values(src),
2449 MINIFLOW_VALUES_SIZE(n_values));
2450 }
2451
2452 /* Initializes 'dst' as a copy of 'src'. */
2453 void
2454 miniflow_expand(const struct miniflow *src, struct flow *dst)
2455 {
2456 memset(dst, 0, sizeof *dst);
2457 flow_union_with_miniflow(dst, src);
2458 }
2459
2460 /* Returns true if 'a' and 'b' are equal miniflows, false otherwise. */
2461 bool
2462 miniflow_equal(const struct miniflow *a, const struct miniflow *b)
2463 {
2464 const uint64_t *ap = miniflow_get_values(a);
2465 const uint64_t *bp = miniflow_get_values(b);
2466
2467 /* This is mostly called after a matching hash, so it is highly likely that
2468 * the maps are equal as well. */
2469 if (OVS_LIKELY(flowmap_equal(a->map, b->map))) {
2470 return !memcmp(ap, bp, miniflow_n_values(a) * sizeof *ap);
2471 } else {
2472 size_t idx;
2473
2474 FLOWMAP_FOR_EACH_INDEX (idx, flowmap_or(a->map, b->map)) {
2475 if ((flowmap_is_set(&a->map, idx) ? *ap++ : 0)
2476 != (flowmap_is_set(&b->map, idx) ? *bp++ : 0)) {
2477 return false;
2478 }
2479 }
2480 }
2481
2482 return true;
2483 }
2484
2485 /* Returns false if 'a' and 'b' differ at the places where there are 1-bits
2486 * in 'mask', true otherwise. */
2487 bool
2488 miniflow_equal_in_minimask(const struct miniflow *a, const struct miniflow *b,
2489 const struct minimask *mask)
2490 {
2491 const uint64_t *p = miniflow_get_values(&mask->masks);
2492 size_t idx;
2493
2494 FLOWMAP_FOR_EACH_INDEX(idx, mask->masks.map) {
2495 if ((miniflow_get(a, idx) ^ miniflow_get(b, idx)) & *p++) {
2496 return false;
2497 }
2498 }
2499
2500 return true;
2501 }
2502
2503 /* Returns true if 'a' and 'b' are equal at the places where there are 1-bits
2504 * in 'mask', false if they differ. */
2505 bool
2506 miniflow_equal_flow_in_minimask(const struct miniflow *a, const struct flow *b,
2507 const struct minimask *mask)
2508 {
2509 const uint64_t *p = miniflow_get_values(&mask->masks);
2510 size_t idx;
2511
2512 FLOWMAP_FOR_EACH_INDEX(idx, mask->masks.map) {
2513 if ((miniflow_get(a, idx) ^ flow_u64_value(b, idx)) & *p++) {
2514 return false;
2515 }
2516 }
2517
2518 return true;
2519 }
2520
2521 \f
2522 void
2523 minimask_init(struct minimask *mask, const struct flow_wildcards *wc)
2524 {
2525 miniflow_init(&mask->masks, &wc->masks);
2526 }
2527
2528 /* Returns a minimask copy of 'wc'. The caller must eventually free the
2529 * returned minimask with free(). */
2530 struct minimask *
2531 minimask_create(const struct flow_wildcards *wc)
2532 {
2533 return (struct minimask *)miniflow_create(&wc->masks);
2534 }
2535
2536 /* Initializes 'dst_' as the bit-wise "and" of 'a_' and 'b_'.
2537 *
2538 * The caller must provide room for FLOW_U64S "uint64_t"s in 'storage', which
2539 * must follow '*dst_' in memory, for use by 'dst_'. The caller must *not*
2540 * free 'dst_' free(). */
2541 void
2542 minimask_combine(struct minimask *dst_,
2543 const struct minimask *a_, const struct minimask *b_,
2544 uint64_t storage[FLOW_U64S])
2545 {
2546 struct miniflow *dst = &dst_->masks;
2547 uint64_t *dst_values = storage;
2548 const struct miniflow *a = &a_->masks;
2549 const struct miniflow *b = &b_->masks;
2550 size_t idx;
2551
2552 flowmap_init(&dst->map);
2553
2554 FLOWMAP_FOR_EACH_INDEX(idx, flowmap_and(a->map, b->map)) {
2555 /* Both 'a' and 'b' have non-zero data at 'idx'. */
2556 uint64_t mask = *miniflow_get__(a, idx) & *miniflow_get__(b, idx);
2557
2558 if (mask) {
2559 flowmap_set(&dst->map, idx, 1);
2560 *dst_values++ = mask;
2561 }
2562 }
2563 }
2564
2565 /* Initializes 'wc' as a copy of 'mask'. */
2566 void
2567 minimask_expand(const struct minimask *mask, struct flow_wildcards *wc)
2568 {
2569 miniflow_expand(&mask->masks, &wc->masks);
2570 }
2571
2572 /* Returns true if 'a' and 'b' are the same flow mask, false otherwise.
2573 * Minimasks may not have zero data values, so for the minimasks to be the
2574 * same, they need to have the same map and the same data values. */
2575 bool
2576 minimask_equal(const struct minimask *a, const struct minimask *b)
2577 {
2578 return !memcmp(a, b, sizeof *a
2579 + MINIFLOW_VALUES_SIZE(miniflow_n_values(&a->masks)));
2580 }
2581
2582 /* Returns true if at least one bit matched by 'b' is wildcarded by 'a',
2583 * false otherwise. */
2584 bool
2585 minimask_has_extra(const struct minimask *a, const struct minimask *b)
2586 {
2587 const uint64_t *bp = miniflow_get_values(&b->masks);
2588 size_t idx;
2589
2590 FLOWMAP_FOR_EACH_INDEX(idx, b->masks.map) {
2591 uint64_t b_u64 = *bp++;
2592
2593 /* 'b_u64' is non-zero, check if the data in 'a' is either zero
2594 * or misses some of the bits in 'b_u64'. */
2595 if (!MINIFLOW_IN_MAP(&a->masks, idx)
2596 || ((*miniflow_get__(&a->masks, idx) & b_u64) != b_u64)) {
2597 return true; /* 'a' wildcards some bits 'b' doesn't. */
2598 }
2599 }
2600
2601 return false;
2602 }