]> git.proxmox.com Git - ovs.git/blob - lib/flow.c
0844b3bca329329e2c1e3130fc985ca4b7ac17e3
[ovs.git] / lib / flow.c
1 /*
2 * Copyright (c) 2008, 2009, 2010, 2011, 2012 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16 #include <config.h>
17 #include <sys/types.h>
18 #include "flow.h"
19 #include <assert.h>
20 #include <errno.h>
21 #include <inttypes.h>
22 #include <netinet/in.h>
23 #include <netinet/icmp6.h>
24 #include <netinet/ip6.h>
25 #include <stdlib.h>
26 #include <string.h>
27 #include "byte-order.h"
28 #include "coverage.h"
29 #include "csum.h"
30 #include "dynamic-string.h"
31 #include "hash.h"
32 #include "ofpbuf.h"
33 #include "openflow/openflow.h"
34 #include "packets.h"
35 #include "unaligned.h"
36 #include "vlog.h"
37
38 VLOG_DEFINE_THIS_MODULE(flow);
39
40 COVERAGE_DEFINE(flow_extract);
41
42 static struct arp_eth_header *
43 pull_arp(struct ofpbuf *packet)
44 {
45 return ofpbuf_try_pull(packet, ARP_ETH_HEADER_LEN);
46 }
47
48 static struct ip_header *
49 pull_ip(struct ofpbuf *packet)
50 {
51 if (packet->size >= IP_HEADER_LEN) {
52 struct ip_header *ip = packet->data;
53 int ip_len = IP_IHL(ip->ip_ihl_ver) * 4;
54 if (ip_len >= IP_HEADER_LEN && packet->size >= ip_len) {
55 return ofpbuf_pull(packet, ip_len);
56 }
57 }
58 return NULL;
59 }
60
61 static struct tcp_header *
62 pull_tcp(struct ofpbuf *packet)
63 {
64 if (packet->size >= TCP_HEADER_LEN) {
65 struct tcp_header *tcp = packet->data;
66 int tcp_len = TCP_OFFSET(tcp->tcp_ctl) * 4;
67 if (tcp_len >= TCP_HEADER_LEN && packet->size >= tcp_len) {
68 return ofpbuf_pull(packet, tcp_len);
69 }
70 }
71 return NULL;
72 }
73
74 static struct udp_header *
75 pull_udp(struct ofpbuf *packet)
76 {
77 return ofpbuf_try_pull(packet, UDP_HEADER_LEN);
78 }
79
80 static struct icmp_header *
81 pull_icmp(struct ofpbuf *packet)
82 {
83 return ofpbuf_try_pull(packet, ICMP_HEADER_LEN);
84 }
85
86 static struct icmp6_hdr *
87 pull_icmpv6(struct ofpbuf *packet)
88 {
89 return ofpbuf_try_pull(packet, sizeof(struct icmp6_hdr));
90 }
91
92 static void
93 parse_vlan(struct ofpbuf *b, struct flow *flow)
94 {
95 struct qtag_prefix {
96 ovs_be16 eth_type; /* ETH_TYPE_VLAN */
97 ovs_be16 tci;
98 };
99
100 if (b->size >= sizeof(struct qtag_prefix) + sizeof(ovs_be16)) {
101 struct qtag_prefix *qp = ofpbuf_pull(b, sizeof *qp);
102 flow->vlan_tci = qp->tci | htons(VLAN_CFI);
103 }
104 }
105
106 static ovs_be16
107 parse_ethertype(struct ofpbuf *b)
108 {
109 struct llc_snap_header *llc;
110 ovs_be16 proto;
111
112 proto = *(ovs_be16 *) ofpbuf_pull(b, sizeof proto);
113 if (ntohs(proto) >= ETH_TYPE_MIN) {
114 return proto;
115 }
116
117 if (b->size < sizeof *llc) {
118 return htons(FLOW_DL_TYPE_NONE);
119 }
120
121 llc = b->data;
122 if (llc->llc.llc_dsap != LLC_DSAP_SNAP
123 || llc->llc.llc_ssap != LLC_SSAP_SNAP
124 || llc->llc.llc_cntl != LLC_CNTL_SNAP
125 || memcmp(llc->snap.snap_org, SNAP_ORG_ETHERNET,
126 sizeof llc->snap.snap_org)) {
127 return htons(FLOW_DL_TYPE_NONE);
128 }
129
130 ofpbuf_pull(b, sizeof *llc);
131 return llc->snap.snap_type;
132 }
133
134 static int
135 parse_ipv6(struct ofpbuf *packet, struct flow *flow)
136 {
137 const struct ip6_hdr *nh;
138 ovs_be32 tc_flow;
139 int nexthdr;
140
141 nh = ofpbuf_try_pull(packet, sizeof *nh);
142 if (!nh) {
143 return EINVAL;
144 }
145
146 nexthdr = nh->ip6_nxt;
147
148 flow->ipv6_src = nh->ip6_src;
149 flow->ipv6_dst = nh->ip6_dst;
150
151 tc_flow = get_unaligned_be32(&nh->ip6_flow);
152 flow->nw_tos = ntohl(tc_flow) >> 20;
153 flow->ipv6_label = tc_flow & htonl(IPV6_LABEL_MASK);
154 flow->nw_ttl = nh->ip6_hlim;
155 flow->nw_proto = IPPROTO_NONE;
156
157 while (1) {
158 if ((nexthdr != IPPROTO_HOPOPTS)
159 && (nexthdr != IPPROTO_ROUTING)
160 && (nexthdr != IPPROTO_DSTOPTS)
161 && (nexthdr != IPPROTO_AH)
162 && (nexthdr != IPPROTO_FRAGMENT)) {
163 /* It's either a terminal header (e.g., TCP, UDP) or one we
164 * don't understand. In either case, we're done with the
165 * packet, so use it to fill in 'nw_proto'. */
166 break;
167 }
168
169 /* We only verify that at least 8 bytes of the next header are
170 * available, but many of these headers are longer. Ensure that
171 * accesses within the extension header are within those first 8
172 * bytes. All extension headers are required to be at least 8
173 * bytes. */
174 if (packet->size < 8) {
175 return EINVAL;
176 }
177
178 if ((nexthdr == IPPROTO_HOPOPTS)
179 || (nexthdr == IPPROTO_ROUTING)
180 || (nexthdr == IPPROTO_DSTOPTS)) {
181 /* These headers, while different, have the fields we care about
182 * in the same location and with the same interpretation. */
183 const struct ip6_ext *ext_hdr = packet->data;
184 nexthdr = ext_hdr->ip6e_nxt;
185 if (!ofpbuf_try_pull(packet, (ext_hdr->ip6e_len + 1) * 8)) {
186 return EINVAL;
187 }
188 } else if (nexthdr == IPPROTO_AH) {
189 /* A standard AH definition isn't available, but the fields
190 * we care about are in the same location as the generic
191 * option header--only the header length is calculated
192 * differently. */
193 const struct ip6_ext *ext_hdr = packet->data;
194 nexthdr = ext_hdr->ip6e_nxt;
195 if (!ofpbuf_try_pull(packet, (ext_hdr->ip6e_len + 2) * 4)) {
196 return EINVAL;
197 }
198 } else if (nexthdr == IPPROTO_FRAGMENT) {
199 const struct ip6_frag *frag_hdr = packet->data;
200
201 nexthdr = frag_hdr->ip6f_nxt;
202 if (!ofpbuf_try_pull(packet, sizeof *frag_hdr)) {
203 return EINVAL;
204 }
205
206 /* We only process the first fragment. */
207 if (frag_hdr->ip6f_offlg != htons(0)) {
208 if ((frag_hdr->ip6f_offlg & IP6F_OFF_MASK) == htons(0)) {
209 flow->nw_frag = FLOW_NW_FRAG_ANY;
210 } else {
211 flow->nw_frag |= FLOW_NW_FRAG_LATER;
212 nexthdr = IPPROTO_FRAGMENT;
213 break;
214 }
215 }
216 }
217 }
218
219 flow->nw_proto = nexthdr;
220 return 0;
221 }
222
223 static void
224 parse_tcp(struct ofpbuf *packet, struct ofpbuf *b, struct flow *flow)
225 {
226 const struct tcp_header *tcp = pull_tcp(b);
227 if (tcp) {
228 flow->tp_src = tcp->tcp_src;
229 flow->tp_dst = tcp->tcp_dst;
230 packet->l7 = b->data;
231 }
232 }
233
234 static void
235 parse_udp(struct ofpbuf *packet, struct ofpbuf *b, struct flow *flow)
236 {
237 const struct udp_header *udp = pull_udp(b);
238 if (udp) {
239 flow->tp_src = udp->udp_src;
240 flow->tp_dst = udp->udp_dst;
241 packet->l7 = b->data;
242 }
243 }
244
245 static bool
246 parse_icmpv6(struct ofpbuf *b, struct flow *flow)
247 {
248 const struct icmp6_hdr *icmp = pull_icmpv6(b);
249
250 if (!icmp) {
251 return false;
252 }
253
254 /* The ICMPv6 type and code fields use the 16-bit transport port
255 * fields, so we need to store them in 16-bit network byte order. */
256 flow->tp_src = htons(icmp->icmp6_type);
257 flow->tp_dst = htons(icmp->icmp6_code);
258
259 if (icmp->icmp6_code == 0 &&
260 (icmp->icmp6_type == ND_NEIGHBOR_SOLICIT ||
261 icmp->icmp6_type == ND_NEIGHBOR_ADVERT)) {
262 const struct in6_addr *nd_target;
263
264 nd_target = ofpbuf_try_pull(b, sizeof *nd_target);
265 if (!nd_target) {
266 return false;
267 }
268 flow->nd_target = *nd_target;
269
270 while (b->size >= 8) {
271 /* The minimum size of an option is 8 bytes, which also is
272 * the size of Ethernet link-layer options. */
273 const struct nd_opt_hdr *nd_opt = b->data;
274 int opt_len = nd_opt->nd_opt_len * 8;
275
276 if (!opt_len || opt_len > b->size) {
277 goto invalid;
278 }
279
280 /* Store the link layer address if the appropriate option is
281 * provided. It is considered an error if the same link
282 * layer option is specified twice. */
283 if (nd_opt->nd_opt_type == ND_OPT_SOURCE_LINKADDR
284 && opt_len == 8) {
285 if (eth_addr_is_zero(flow->arp_sha)) {
286 memcpy(flow->arp_sha, nd_opt + 1, ETH_ADDR_LEN);
287 } else {
288 goto invalid;
289 }
290 } else if (nd_opt->nd_opt_type == ND_OPT_TARGET_LINKADDR
291 && opt_len == 8) {
292 if (eth_addr_is_zero(flow->arp_tha)) {
293 memcpy(flow->arp_tha, nd_opt + 1, ETH_ADDR_LEN);
294 } else {
295 goto invalid;
296 }
297 }
298
299 if (!ofpbuf_try_pull(b, opt_len)) {
300 goto invalid;
301 }
302 }
303 }
304
305 return true;
306
307 invalid:
308 memset(&flow->nd_target, 0, sizeof(flow->nd_target));
309 memset(flow->arp_sha, 0, sizeof(flow->arp_sha));
310 memset(flow->arp_tha, 0, sizeof(flow->arp_tha));
311
312 return false;
313
314 }
315
316 /* Initializes 'flow' members from 'packet', 'skb_priority', 'tun_id', and
317 * 'ofp_in_port'.
318 *
319 * Initializes 'packet' header pointers as follows:
320 *
321 * - packet->l2 to the start of the Ethernet header.
322 *
323 * - packet->l3 to just past the Ethernet header, or just past the
324 * vlan_header if one is present, to the first byte of the payload of the
325 * Ethernet frame.
326 *
327 * - packet->l4 to just past the IPv4 header, if one is present and has a
328 * correct length, and otherwise NULL.
329 *
330 * - packet->l7 to just past the TCP or UDP or ICMP header, if one is
331 * present and has a correct length, and otherwise NULL.
332 */
333 void
334 flow_extract(struct ofpbuf *packet, uint32_t skb_priority, ovs_be64 tun_id,
335 uint16_t ofp_in_port, struct flow *flow)
336 {
337 struct ofpbuf b = *packet;
338 struct eth_header *eth;
339
340 COVERAGE_INC(flow_extract);
341
342 memset(flow, 0, sizeof *flow);
343 flow->tun_id = tun_id;
344 flow->in_port = ofp_in_port;
345 flow->skb_priority = skb_priority;
346
347 packet->l2 = b.data;
348 packet->l3 = NULL;
349 packet->l4 = NULL;
350 packet->l7 = NULL;
351
352 if (b.size < sizeof *eth) {
353 return;
354 }
355
356 /* Link layer. */
357 eth = b.data;
358 memcpy(flow->dl_src, eth->eth_src, ETH_ADDR_LEN);
359 memcpy(flow->dl_dst, eth->eth_dst, ETH_ADDR_LEN);
360
361 /* dl_type, vlan_tci. */
362 ofpbuf_pull(&b, ETH_ADDR_LEN * 2);
363 if (eth->eth_type == htons(ETH_TYPE_VLAN)) {
364 parse_vlan(&b, flow);
365 }
366 flow->dl_type = parse_ethertype(&b);
367
368 /* Network layer. */
369 packet->l3 = b.data;
370 if (flow->dl_type == htons(ETH_TYPE_IP)) {
371 const struct ip_header *nh = pull_ip(&b);
372 if (nh) {
373 packet->l4 = b.data;
374
375 flow->nw_src = get_unaligned_be32(&nh->ip_src);
376 flow->nw_dst = get_unaligned_be32(&nh->ip_dst);
377 flow->nw_proto = nh->ip_proto;
378
379 flow->nw_tos = nh->ip_tos;
380 if (IP_IS_FRAGMENT(nh->ip_frag_off)) {
381 flow->nw_frag = FLOW_NW_FRAG_ANY;
382 if (nh->ip_frag_off & htons(IP_FRAG_OFF_MASK)) {
383 flow->nw_frag |= FLOW_NW_FRAG_LATER;
384 }
385 }
386 flow->nw_ttl = nh->ip_ttl;
387
388 if (!(nh->ip_frag_off & htons(IP_FRAG_OFF_MASK))) {
389 if (flow->nw_proto == IPPROTO_TCP) {
390 parse_tcp(packet, &b, flow);
391 } else if (flow->nw_proto == IPPROTO_UDP) {
392 parse_udp(packet, &b, flow);
393 } else if (flow->nw_proto == IPPROTO_ICMP) {
394 const struct icmp_header *icmp = pull_icmp(&b);
395 if (icmp) {
396 flow->tp_src = htons(icmp->icmp_type);
397 flow->tp_dst = htons(icmp->icmp_code);
398 packet->l7 = b.data;
399 }
400 }
401 }
402 }
403 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
404 if (parse_ipv6(&b, flow)) {
405 return;
406 }
407
408 packet->l4 = b.data;
409 if (flow->nw_proto == IPPROTO_TCP) {
410 parse_tcp(packet, &b, flow);
411 } else if (flow->nw_proto == IPPROTO_UDP) {
412 parse_udp(packet, &b, flow);
413 } else if (flow->nw_proto == IPPROTO_ICMPV6) {
414 if (parse_icmpv6(&b, flow)) {
415 packet->l7 = b.data;
416 }
417 }
418 } else if (flow->dl_type == htons(ETH_TYPE_ARP)) {
419 const struct arp_eth_header *arp = pull_arp(&b);
420 if (arp && arp->ar_hrd == htons(1)
421 && arp->ar_pro == htons(ETH_TYPE_IP)
422 && arp->ar_hln == ETH_ADDR_LEN
423 && arp->ar_pln == 4) {
424 /* We only match on the lower 8 bits of the opcode. */
425 if (ntohs(arp->ar_op) <= 0xff) {
426 flow->nw_proto = ntohs(arp->ar_op);
427 }
428
429 if ((flow->nw_proto == ARP_OP_REQUEST)
430 || (flow->nw_proto == ARP_OP_REPLY)) {
431 flow->nw_src = arp->ar_spa;
432 flow->nw_dst = arp->ar_tpa;
433 memcpy(flow->arp_sha, arp->ar_sha, ETH_ADDR_LEN);
434 memcpy(flow->arp_tha, arp->ar_tha, ETH_ADDR_LEN);
435 }
436 }
437 }
438 }
439
440 /* For every bit of a field that is wildcarded in 'wildcards', sets the
441 * corresponding bit in 'flow' to zero. */
442 void
443 flow_zero_wildcards(struct flow *flow, const struct flow_wildcards *wildcards)
444 {
445 const flow_wildcards_t wc = wildcards->wildcards;
446 int i;
447
448 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 15);
449
450 for (i = 0; i < FLOW_N_REGS; i++) {
451 flow->regs[i] &= wildcards->reg_masks[i];
452 }
453 flow->tun_id &= wildcards->tun_id_mask;
454 flow->metadata &= wildcards->metadata_mask;
455 flow->nw_src &= wildcards->nw_src_mask;
456 flow->nw_dst &= wildcards->nw_dst_mask;
457 if (wc & FWW_IN_PORT) {
458 flow->in_port = 0;
459 }
460 flow->vlan_tci &= wildcards->vlan_tci_mask;
461 if (wc & FWW_DL_TYPE) {
462 flow->dl_type = htons(0);
463 }
464 flow->tp_src &= wildcards->tp_src_mask;
465 flow->tp_dst &= wildcards->tp_dst_mask;
466 eth_addr_bitand(flow->dl_src, wildcards->dl_src_mask, flow->dl_src);
467 eth_addr_bitand(flow->dl_dst, wildcards->dl_dst_mask, flow->dl_dst);
468 if (wc & FWW_NW_PROTO) {
469 flow->nw_proto = 0;
470 }
471 flow->ipv6_label &= wildcards->ipv6_label_mask;
472 flow->nw_tos &= wildcards->nw_tos_mask;
473 flow->nw_ttl &= wildcards->nw_ttl_mask;
474 flow->nw_frag &= wildcards->nw_frag_mask;
475 eth_addr_bitand(flow->arp_sha, wildcards->arp_sha_mask, flow->arp_sha);
476 eth_addr_bitand(flow->arp_tha, wildcards->arp_tha_mask, flow->arp_tha);
477 flow->ipv6_src = ipv6_addr_bitand(&flow->ipv6_src,
478 &wildcards->ipv6_src_mask);
479 flow->ipv6_dst = ipv6_addr_bitand(&flow->ipv6_dst,
480 &wildcards->ipv6_dst_mask);
481 flow->nd_target = ipv6_addr_bitand(&flow->nd_target,
482 &wildcards->nd_target_mask);
483 flow->skb_priority = 0;
484 }
485
486 /* Initializes 'fmd' with the metadata found in 'flow'. */
487 void
488 flow_get_metadata(const struct flow *flow, struct flow_metadata *fmd)
489 {
490 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 15);
491
492 fmd->tun_id = flow->tun_id;
493 fmd->metadata = flow->metadata;
494 memcpy(fmd->regs, flow->regs, sizeof fmd->regs);
495 fmd->in_port = flow->in_port;
496 }
497
498 char *
499 flow_to_string(const struct flow *flow)
500 {
501 struct ds ds = DS_EMPTY_INITIALIZER;
502 flow_format(&ds, flow);
503 return ds_cstr(&ds);
504 }
505
506 void
507 flow_format(struct ds *ds, const struct flow *flow)
508 {
509 ds_put_format(ds, "priority:%"PRIu32
510 ",tunnel:%#"PRIx64
511 ",metadata:%#"PRIx64
512 ",in_port:%04"PRIx16,
513 flow->skb_priority,
514 ntohll(flow->tun_id),
515 ntohll(flow->metadata),
516 flow->in_port);
517
518 ds_put_format(ds, ",tci(");
519 if (flow->vlan_tci) {
520 ds_put_format(ds, "vlan:%"PRIu16",pcp:%d",
521 vlan_tci_to_vid(flow->vlan_tci),
522 vlan_tci_to_pcp(flow->vlan_tci));
523 } else {
524 ds_put_char(ds, '0');
525 }
526 ds_put_format(ds, ") mac("ETH_ADDR_FMT"->"ETH_ADDR_FMT
527 ") type:%04"PRIx16,
528 ETH_ADDR_ARGS(flow->dl_src),
529 ETH_ADDR_ARGS(flow->dl_dst),
530 ntohs(flow->dl_type));
531
532 if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
533 ds_put_format(ds, " label:%#"PRIx32" proto:%"PRIu8" tos:%#"PRIx8
534 " ttl:%"PRIu8" ipv6(",
535 ntohl(flow->ipv6_label), flow->nw_proto,
536 flow->nw_tos, flow->nw_ttl);
537 print_ipv6_addr(ds, &flow->ipv6_src);
538 ds_put_cstr(ds, "->");
539 print_ipv6_addr(ds, &flow->ipv6_dst);
540 ds_put_char(ds, ')');
541 } else {
542 ds_put_format(ds, " proto:%"PRIu8" tos:%#"PRIx8" ttl:%"PRIu8
543 " ip("IP_FMT"->"IP_FMT")",
544 flow->nw_proto, flow->nw_tos, flow->nw_ttl,
545 IP_ARGS(&flow->nw_src), IP_ARGS(&flow->nw_dst));
546 }
547 if (flow->nw_frag) {
548 ds_put_format(ds, " frag(%s)",
549 flow->nw_frag == FLOW_NW_FRAG_ANY ? "first"
550 : flow->nw_frag == (FLOW_NW_FRAG_ANY | FLOW_NW_FRAG_LATER)
551 ? "later" : "<error>");
552 }
553 if (flow->tp_src || flow->tp_dst) {
554 ds_put_format(ds, " port(%"PRIu16"->%"PRIu16")",
555 ntohs(flow->tp_src), ntohs(flow->tp_dst));
556 }
557 if (!eth_addr_is_zero(flow->arp_sha) || !eth_addr_is_zero(flow->arp_tha)) {
558 ds_put_format(ds, " arp_ha("ETH_ADDR_FMT"->"ETH_ADDR_FMT")",
559 ETH_ADDR_ARGS(flow->arp_sha),
560 ETH_ADDR_ARGS(flow->arp_tha));
561 }
562 }
563
564 void
565 flow_print(FILE *stream, const struct flow *flow)
566 {
567 char *s = flow_to_string(flow);
568 fputs(s, stream);
569 free(s);
570 }
571 \f
572 /* flow_wildcards functions. */
573
574 /* Initializes 'wc' as a set of wildcards that matches every packet. */
575 void
576 flow_wildcards_init_catchall(struct flow_wildcards *wc)
577 {
578 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 15);
579
580 wc->wildcards = FWW_ALL;
581 wc->tun_id_mask = htonll(0);
582 wc->nw_src_mask = htonl(0);
583 wc->nw_dst_mask = htonl(0);
584 wc->ipv6_src_mask = in6addr_any;
585 wc->ipv6_dst_mask = in6addr_any;
586 wc->ipv6_label_mask = htonl(0);
587 wc->nd_target_mask = in6addr_any;
588 memset(wc->reg_masks, 0, sizeof wc->reg_masks);
589 wc->metadata_mask = htonll(0);
590 wc->vlan_tci_mask = htons(0);
591 wc->nw_frag_mask = 0;
592 wc->tp_src_mask = htons(0);
593 wc->tp_dst_mask = htons(0);
594 memset(wc->dl_src_mask, 0, ETH_ADDR_LEN);
595 memset(wc->dl_dst_mask, 0, ETH_ADDR_LEN);
596 memset(wc->arp_sha_mask, 0, ETH_ADDR_LEN);
597 memset(wc->arp_tha_mask, 0, ETH_ADDR_LEN);
598 wc->nw_tos_mask = 0;
599 wc->nw_ttl_mask = 0;
600 memset(wc->zeros, 0, sizeof wc->zeros);
601 }
602
603 /* Initializes 'wc' as an exact-match set of wildcards; that is, 'wc' does not
604 * wildcard any bits or fields. */
605 void
606 flow_wildcards_init_exact(struct flow_wildcards *wc)
607 {
608 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 15);
609
610 wc->wildcards = 0;
611 wc->tun_id_mask = htonll(UINT64_MAX);
612 wc->nw_src_mask = htonl(UINT32_MAX);
613 wc->nw_dst_mask = htonl(UINT32_MAX);
614 wc->ipv6_src_mask = in6addr_exact;
615 wc->ipv6_dst_mask = in6addr_exact;
616 wc->ipv6_label_mask = htonl(UINT32_MAX);
617 wc->nd_target_mask = in6addr_exact;
618 memset(wc->reg_masks, 0xff, sizeof wc->reg_masks);
619 wc->metadata_mask = htonll(UINT64_MAX);
620 wc->vlan_tci_mask = htons(UINT16_MAX);
621 wc->nw_frag_mask = UINT8_MAX;
622 wc->tp_src_mask = htons(UINT16_MAX);
623 wc->tp_dst_mask = htons(UINT16_MAX);
624 memset(wc->dl_src_mask, 0xff, ETH_ADDR_LEN);
625 memset(wc->dl_dst_mask, 0xff, ETH_ADDR_LEN);
626 memset(wc->arp_sha_mask, 0xff, ETH_ADDR_LEN);
627 memset(wc->arp_tha_mask, 0xff, ETH_ADDR_LEN);
628 wc->nw_tos_mask = UINT8_MAX;
629 wc->nw_ttl_mask = UINT8_MAX;
630 memset(wc->zeros, 0, sizeof wc->zeros);
631 }
632
633 /* Returns true if 'wc' is exact-match, false if 'wc' wildcards any bits or
634 * fields. */
635 bool
636 flow_wildcards_is_exact(const struct flow_wildcards *wc)
637 {
638 int i;
639
640 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 15);
641
642 if (wc->wildcards
643 || wc->tun_id_mask != htonll(UINT64_MAX)
644 || wc->nw_src_mask != htonl(UINT32_MAX)
645 || wc->nw_dst_mask != htonl(UINT32_MAX)
646 || wc->tp_src_mask != htons(UINT16_MAX)
647 || wc->tp_dst_mask != htons(UINT16_MAX)
648 || wc->vlan_tci_mask != htons(UINT16_MAX)
649 || wc->metadata_mask != htonll(UINT64_MAX)
650 || !eth_mask_is_exact(wc->dl_src_mask)
651 || !eth_mask_is_exact(wc->dl_dst_mask)
652 || !eth_mask_is_exact(wc->arp_sha_mask)
653 || !eth_mask_is_exact(wc->arp_tha_mask)
654 || !ipv6_mask_is_exact(&wc->ipv6_src_mask)
655 || !ipv6_mask_is_exact(&wc->ipv6_dst_mask)
656 || wc->ipv6_label_mask != htonl(UINT32_MAX)
657 || !ipv6_mask_is_exact(&wc->nd_target_mask)
658 || wc->nw_frag_mask != UINT8_MAX
659 || wc->nw_tos_mask != UINT8_MAX
660 || wc->nw_ttl_mask != UINT8_MAX) {
661 return false;
662 }
663
664 for (i = 0; i < FLOW_N_REGS; i++) {
665 if (wc->reg_masks[i] != UINT32_MAX) {
666 return false;
667 }
668 }
669
670 return true;
671 }
672
673 /* Returns true if 'wc' matches every packet, false if 'wc' fixes any bits or
674 * fields. */
675 bool
676 flow_wildcards_is_catchall(const struct flow_wildcards *wc)
677 {
678 int i;
679
680 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 15);
681
682 if (wc->wildcards != FWW_ALL
683 || wc->tun_id_mask != htonll(0)
684 || wc->nw_src_mask != htonl(0)
685 || wc->nw_dst_mask != htonl(0)
686 || wc->tp_src_mask != htons(0)
687 || wc->tp_dst_mask != htons(0)
688 || wc->vlan_tci_mask != htons(0)
689 || wc->metadata_mask != htonll(0)
690 || !eth_addr_is_zero(wc->dl_src_mask)
691 || !eth_addr_is_zero(wc->dl_dst_mask)
692 || !eth_addr_is_zero(wc->arp_sha_mask)
693 || !eth_addr_is_zero(wc->arp_tha_mask)
694 || !ipv6_mask_is_any(&wc->ipv6_src_mask)
695 || !ipv6_mask_is_any(&wc->ipv6_dst_mask)
696 || wc->ipv6_label_mask != htonl(0)
697 || !ipv6_mask_is_any(&wc->nd_target_mask)
698 || wc->nw_frag_mask != 0
699 || wc->nw_tos_mask != 0
700 || wc->nw_ttl_mask != 0) {
701 return false;
702 }
703
704 for (i = 0; i < FLOW_N_REGS; i++) {
705 if (wc->reg_masks[i] != 0) {
706 return false;
707 }
708 }
709
710 return true;
711 }
712
713 /* Initializes 'dst' as the combination of wildcards in 'src1' and 'src2'.
714 * That is, a bit or a field is wildcarded in 'dst' if it is wildcarded in
715 * 'src1' or 'src2' or both. */
716 void
717 flow_wildcards_combine(struct flow_wildcards *dst,
718 const struct flow_wildcards *src1,
719 const struct flow_wildcards *src2)
720 {
721 int i;
722
723 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 15);
724
725 dst->wildcards = src1->wildcards | src2->wildcards;
726 dst->tun_id_mask = src1->tun_id_mask & src2->tun_id_mask;
727 dst->nw_src_mask = src1->nw_src_mask & src2->nw_src_mask;
728 dst->nw_dst_mask = src1->nw_dst_mask & src2->nw_dst_mask;
729 dst->ipv6_src_mask = ipv6_addr_bitand(&src1->ipv6_src_mask,
730 &src2->ipv6_src_mask);
731 dst->ipv6_dst_mask = ipv6_addr_bitand(&src1->ipv6_dst_mask,
732 &src2->ipv6_dst_mask);
733 dst->ipv6_label_mask = src1->ipv6_label_mask & src2->ipv6_label_mask;
734 dst->nd_target_mask = ipv6_addr_bitand(&src1->nd_target_mask,
735 &src2->nd_target_mask);
736 for (i = 0; i < FLOW_N_REGS; i++) {
737 dst->reg_masks[i] = src1->reg_masks[i] & src2->reg_masks[i];
738 }
739 dst->metadata_mask = src1->metadata_mask & src2->metadata_mask;
740 dst->vlan_tci_mask = src1->vlan_tci_mask & src2->vlan_tci_mask;
741 dst->tp_src_mask = src1->tp_src_mask & src2->tp_src_mask;
742 dst->tp_dst_mask = src1->tp_dst_mask & src2->tp_dst_mask;
743 dst->nw_frag_mask = src1->nw_frag_mask & src2->nw_frag_mask;
744 eth_addr_bitand(src1->dl_src_mask, src2->dl_src_mask, dst->dl_src_mask);
745 eth_addr_bitand(src1->dl_dst_mask, src2->dl_dst_mask, dst->dl_dst_mask);
746 eth_addr_bitand(src1->arp_sha_mask, src2->arp_sha_mask, dst->arp_sha_mask);
747 eth_addr_bitand(src1->arp_tha_mask, src2->arp_tha_mask, dst->arp_tha_mask);
748 dst->nw_tos_mask = src1->nw_tos_mask & src2->nw_tos_mask;
749 dst->nw_ttl_mask = src1->nw_ttl_mask & src2->nw_ttl_mask;
750 }
751
752 /* Returns a hash of the wildcards in 'wc'. */
753 uint32_t
754 flow_wildcards_hash(const struct flow_wildcards *wc, uint32_t basis)
755 {
756 /* If you change struct flow_wildcards and thereby trigger this
757 * assertion, please check that the new struct flow_wildcards has no holes
758 * in it before you update the assertion. */
759 BUILD_ASSERT_DECL(sizeof *wc == 120 + FLOW_N_REGS * 4);
760 return hash_bytes(wc, sizeof *wc, basis);
761 }
762
763 /* Returns true if 'a' and 'b' represent the same wildcards, false if they are
764 * different. */
765 bool
766 flow_wildcards_equal(const struct flow_wildcards *a,
767 const struct flow_wildcards *b)
768 {
769 int i;
770
771 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 15);
772
773 if (a->wildcards != b->wildcards
774 || a->tun_id_mask != b->tun_id_mask
775 || a->nw_src_mask != b->nw_src_mask
776 || a->nw_dst_mask != b->nw_dst_mask
777 || a->vlan_tci_mask != b->vlan_tci_mask
778 || a->metadata_mask != b->metadata_mask
779 || !ipv6_addr_equals(&a->ipv6_src_mask, &b->ipv6_src_mask)
780 || !ipv6_addr_equals(&a->ipv6_dst_mask, &b->ipv6_dst_mask)
781 || a->ipv6_label_mask != b->ipv6_label_mask
782 || !ipv6_addr_equals(&a->nd_target_mask, &b->nd_target_mask)
783 || a->tp_src_mask != b->tp_src_mask
784 || a->tp_dst_mask != b->tp_dst_mask
785 || a->nw_frag_mask != b->nw_frag_mask
786 || !eth_addr_equals(a->dl_src_mask, b->dl_src_mask)
787 || !eth_addr_equals(a->dl_dst_mask, b->dl_dst_mask)
788 || !eth_addr_equals(a->arp_sha_mask, b->arp_sha_mask)
789 || !eth_addr_equals(a->arp_tha_mask, b->arp_tha_mask)
790 || a->nw_tos_mask != b->nw_tos_mask
791 || a->nw_ttl_mask != b->nw_ttl_mask) {
792 return false;
793 }
794
795 for (i = 0; i < FLOW_N_REGS; i++) {
796 if (a->reg_masks[i] != b->reg_masks[i]) {
797 return false;
798 }
799 }
800
801 return true;
802 }
803
804 /* Returns true if at least one bit or field is wildcarded in 'a' but not in
805 * 'b', false otherwise. */
806 bool
807 flow_wildcards_has_extra(const struct flow_wildcards *a,
808 const struct flow_wildcards *b)
809 {
810 int i;
811 uint8_t eth_masked[ETH_ADDR_LEN];
812 struct in6_addr ipv6_masked;
813
814 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 15);
815
816 for (i = 0; i < FLOW_N_REGS; i++) {
817 if ((a->reg_masks[i] & b->reg_masks[i]) != b->reg_masks[i]) {
818 return true;
819 }
820 }
821
822 eth_addr_bitand(a->dl_src_mask, b->dl_src_mask, eth_masked);
823 if (!eth_addr_equals(eth_masked, b->dl_src_mask)) {
824 return true;
825 }
826
827 eth_addr_bitand(a->dl_dst_mask, b->dl_dst_mask, eth_masked);
828 if (!eth_addr_equals(eth_masked, b->dl_dst_mask)) {
829 return true;
830 }
831
832 eth_addr_bitand(a->arp_sha_mask, b->arp_sha_mask, eth_masked);
833 if (!eth_addr_equals(eth_masked, b->arp_sha_mask)) {
834 return true;
835 }
836
837 eth_addr_bitand(a->arp_tha_mask, b->arp_tha_mask, eth_masked);
838 if (!eth_addr_equals(eth_masked, b->arp_tha_mask)) {
839 return true;
840 }
841
842 ipv6_masked = ipv6_addr_bitand(&a->ipv6_src_mask, &b->ipv6_src_mask);
843 if (!ipv6_addr_equals(&ipv6_masked, &b->ipv6_src_mask)) {
844 return true;
845 }
846
847 ipv6_masked = ipv6_addr_bitand(&a->ipv6_dst_mask, &b->ipv6_dst_mask);
848 if (!ipv6_addr_equals(&ipv6_masked, &b->ipv6_dst_mask)) {
849 return true;
850 }
851
852 ipv6_masked = ipv6_addr_bitand(&a->nd_target_mask, &b->nd_target_mask);
853 if (!ipv6_addr_equals(&ipv6_masked, &b->nd_target_mask)) {
854 return true;
855 }
856
857 return (a->wildcards & ~b->wildcards
858 || (a->tun_id_mask & b->tun_id_mask) != b->tun_id_mask
859 || (a->nw_src_mask & b->nw_src_mask) != b->nw_src_mask
860 || (a->nw_dst_mask & b->nw_dst_mask) != b->nw_dst_mask
861 || (a->ipv6_label_mask & b->ipv6_label_mask) != b->ipv6_label_mask
862 || (a->vlan_tci_mask & b->vlan_tci_mask) != b->vlan_tci_mask
863 || (a->metadata_mask & b->metadata_mask) != b->metadata_mask
864 || (a->tp_src_mask & b->tp_src_mask) != b->tp_src_mask
865 || (a->tp_dst_mask & b->tp_dst_mask) != b->tp_dst_mask
866 || (a->nw_frag_mask & b->nw_frag_mask) != b->nw_frag_mask
867 || (a->nw_tos_mask & b->nw_tos_mask) != b->nw_tos_mask
868 || (a->nw_ttl_mask & b->nw_ttl_mask) != b->nw_ttl_mask);
869 }
870
871 /* Sets the wildcard mask for register 'idx' in 'wc' to 'mask'.
872 * (A 0-bit indicates a wildcard bit.) */
873 void
874 flow_wildcards_set_reg_mask(struct flow_wildcards *wc, int idx, uint32_t mask)
875 {
876 wc->reg_masks[idx] = mask;
877 }
878
879 /* Hashes 'flow' based on its L2 through L4 protocol information. */
880 uint32_t
881 flow_hash_symmetric_l4(const struct flow *flow, uint32_t basis)
882 {
883 struct {
884 union {
885 ovs_be32 ipv4_addr;
886 struct in6_addr ipv6_addr;
887 };
888 ovs_be16 eth_type;
889 ovs_be16 vlan_tci;
890 ovs_be16 tp_port;
891 uint8_t eth_addr[ETH_ADDR_LEN];
892 uint8_t ip_proto;
893 } fields;
894
895 int i;
896
897 memset(&fields, 0, sizeof fields);
898 for (i = 0; i < ETH_ADDR_LEN; i++) {
899 fields.eth_addr[i] = flow->dl_src[i] ^ flow->dl_dst[i];
900 }
901 fields.vlan_tci = flow->vlan_tci & htons(VLAN_VID_MASK);
902 fields.eth_type = flow->dl_type;
903
904 /* UDP source and destination port are not taken into account because they
905 * will not necessarily be symmetric in a bidirectional flow. */
906 if (fields.eth_type == htons(ETH_TYPE_IP)) {
907 fields.ipv4_addr = flow->nw_src ^ flow->nw_dst;
908 fields.ip_proto = flow->nw_proto;
909 if (fields.ip_proto == IPPROTO_TCP) {
910 fields.tp_port = flow->tp_src ^ flow->tp_dst;
911 }
912 } else if (fields.eth_type == htons(ETH_TYPE_IPV6)) {
913 const uint8_t *a = &flow->ipv6_src.s6_addr[0];
914 const uint8_t *b = &flow->ipv6_dst.s6_addr[0];
915 uint8_t *ipv6_addr = &fields.ipv6_addr.s6_addr[0];
916
917 for (i=0; i<16; i++) {
918 ipv6_addr[i] = a[i] ^ b[i];
919 }
920 fields.ip_proto = flow->nw_proto;
921 if (fields.ip_proto == IPPROTO_TCP) {
922 fields.tp_port = flow->tp_src ^ flow->tp_dst;
923 }
924 }
925 return hash_bytes(&fields, sizeof fields, basis);
926 }
927
928 /* Hashes the portions of 'flow' designated by 'fields'. */
929 uint32_t
930 flow_hash_fields(const struct flow *flow, enum nx_hash_fields fields,
931 uint16_t basis)
932 {
933 switch (fields) {
934
935 case NX_HASH_FIELDS_ETH_SRC:
936 return hash_bytes(flow->dl_src, sizeof flow->dl_src, basis);
937
938 case NX_HASH_FIELDS_SYMMETRIC_L4:
939 return flow_hash_symmetric_l4(flow, basis);
940 }
941
942 NOT_REACHED();
943 }
944
945 /* Returns a string representation of 'fields'. */
946 const char *
947 flow_hash_fields_to_str(enum nx_hash_fields fields)
948 {
949 switch (fields) {
950 case NX_HASH_FIELDS_ETH_SRC: return "eth_src";
951 case NX_HASH_FIELDS_SYMMETRIC_L4: return "symmetric_l4";
952 default: return "<unknown>";
953 }
954 }
955
956 /* Returns true if the value of 'fields' is supported. Otherwise false. */
957 bool
958 flow_hash_fields_valid(enum nx_hash_fields fields)
959 {
960 return fields == NX_HASH_FIELDS_ETH_SRC
961 || fields == NX_HASH_FIELDS_SYMMETRIC_L4;
962 }
963
964 /* Sets the VLAN VID that 'flow' matches to 'vid', which is interpreted as an
965 * OpenFlow 1.0 "dl_vlan" value:
966 *
967 * - If it is in the range 0...4095, 'flow->vlan_tci' is set to match
968 * that VLAN. Any existing PCP match is unchanged (it becomes 0 if
969 * 'flow' previously matched packets without a VLAN header).
970 *
971 * - If it is OFP_VLAN_NONE, 'flow->vlan_tci' is set to match a packet
972 * without a VLAN tag.
973 *
974 * - Other values of 'vid' should not be used. */
975 void
976 flow_set_dl_vlan(struct flow *flow, ovs_be16 vid)
977 {
978 if (vid == htons(OFP10_VLAN_NONE)) {
979 flow->vlan_tci = htons(0);
980 } else {
981 vid &= htons(VLAN_VID_MASK);
982 flow->vlan_tci &= ~htons(VLAN_VID_MASK);
983 flow->vlan_tci |= htons(VLAN_CFI) | vid;
984 }
985 }
986
987 /* Sets the VLAN VID that 'flow' matches to 'vid', which is interpreted as an
988 * OpenFlow 1.2 "vlan_vid" value, that is, the low 13 bits of 'vlan_tci' (VID
989 * plus CFI). */
990 void
991 flow_set_vlan_vid(struct flow *flow, ovs_be16 vid)
992 {
993 ovs_be16 mask = htons(VLAN_VID_MASK | VLAN_CFI);
994 flow->vlan_tci &= ~mask;
995 flow->vlan_tci |= vid & mask;
996 }
997
998 /* Sets the VLAN PCP that 'flow' matches to 'pcp', which should be in the
999 * range 0...7.
1000 *
1001 * This function has no effect on the VLAN ID that 'flow' matches.
1002 *
1003 * After calling this function, 'flow' will not match packets without a VLAN
1004 * header. */
1005 void
1006 flow_set_vlan_pcp(struct flow *flow, uint8_t pcp)
1007 {
1008 pcp &= 0x07;
1009 flow->vlan_tci &= ~htons(VLAN_PCP_MASK);
1010 flow->vlan_tci |= htons((pcp << VLAN_PCP_SHIFT) | VLAN_CFI);
1011 }
1012
1013 /* Puts into 'b' a packet that flow_extract() would parse as having the given
1014 * 'flow'.
1015 *
1016 * (This is useful only for testing, obviously, and the packet isn't really
1017 * valid. It hasn't got some checksums filled in, for one, and lots of fields
1018 * are just zeroed.) */
1019 void
1020 flow_compose(struct ofpbuf *b, const struct flow *flow)
1021 {
1022 eth_compose(b, flow->dl_dst, flow->dl_src, ntohs(flow->dl_type), 0);
1023 if (flow->dl_type == htons(FLOW_DL_TYPE_NONE)) {
1024 struct eth_header *eth = b->l2;
1025 eth->eth_type = htons(b->size);
1026 return;
1027 }
1028
1029 if (flow->vlan_tci & htons(VLAN_CFI)) {
1030 eth_push_vlan(b, flow->vlan_tci);
1031 }
1032
1033 if (flow->dl_type == htons(ETH_TYPE_IP)) {
1034 struct ip_header *ip;
1035
1036 b->l3 = ip = ofpbuf_put_zeros(b, sizeof *ip);
1037 ip->ip_ihl_ver = IP_IHL_VER(5, 4);
1038 ip->ip_tos = flow->nw_tos;
1039 ip->ip_proto = flow->nw_proto;
1040 ip->ip_src = flow->nw_src;
1041 ip->ip_dst = flow->nw_dst;
1042
1043 if (flow->nw_frag & FLOW_NW_FRAG_ANY) {
1044 ip->ip_frag_off |= htons(IP_MORE_FRAGMENTS);
1045 if (flow->nw_frag & FLOW_NW_FRAG_LATER) {
1046 ip->ip_frag_off |= htons(100);
1047 }
1048 }
1049 if (!(flow->nw_frag & FLOW_NW_FRAG_ANY)
1050 || !(flow->nw_frag & FLOW_NW_FRAG_LATER)) {
1051 if (flow->nw_proto == IPPROTO_TCP) {
1052 struct tcp_header *tcp;
1053
1054 b->l4 = tcp = ofpbuf_put_zeros(b, sizeof *tcp);
1055 tcp->tcp_src = flow->tp_src;
1056 tcp->tcp_dst = flow->tp_dst;
1057 tcp->tcp_ctl = TCP_CTL(0, 5);
1058 } else if (flow->nw_proto == IPPROTO_UDP) {
1059 struct udp_header *udp;
1060
1061 b->l4 = udp = ofpbuf_put_zeros(b, sizeof *udp);
1062 udp->udp_src = flow->tp_src;
1063 udp->udp_dst = flow->tp_dst;
1064 } else if (flow->nw_proto == IPPROTO_ICMP) {
1065 struct icmp_header *icmp;
1066
1067 b->l4 = icmp = ofpbuf_put_zeros(b, sizeof *icmp);
1068 icmp->icmp_type = ntohs(flow->tp_src);
1069 icmp->icmp_code = ntohs(flow->tp_dst);
1070 icmp->icmp_csum = csum(icmp, ICMP_HEADER_LEN);
1071 }
1072 }
1073
1074 ip = b->l3;
1075 ip->ip_tot_len = htons((uint8_t *) b->data + b->size
1076 - (uint8_t *) b->l3);
1077 ip->ip_csum = csum(ip, sizeof *ip);
1078 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
1079 /* XXX */
1080 } else if (flow->dl_type == htons(ETH_TYPE_ARP)) {
1081 struct arp_eth_header *arp;
1082
1083 b->l3 = arp = ofpbuf_put_zeros(b, sizeof *arp);
1084 arp->ar_hrd = htons(1);
1085 arp->ar_pro = htons(ETH_TYPE_IP);
1086 arp->ar_hln = ETH_ADDR_LEN;
1087 arp->ar_pln = 4;
1088 arp->ar_op = htons(flow->nw_proto);
1089
1090 if (flow->nw_proto == ARP_OP_REQUEST ||
1091 flow->nw_proto == ARP_OP_REPLY) {
1092 arp->ar_spa = flow->nw_src;
1093 arp->ar_tpa = flow->nw_dst;
1094 memcpy(arp->ar_sha, flow->arp_sha, ETH_ADDR_LEN);
1095 memcpy(arp->ar_tha, flow->arp_tha, ETH_ADDR_LEN);
1096 }
1097 }
1098 }