]>
git.proxmox.com Git - ovs.git/blob - lib/classifier.c
5add17588dfed8f31e9d3e1dcfb7ffd448195838
2 * Copyright (c) 2009, 2010, 2011, 2012 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 #include "classifier.h"
21 #include <netinet/in.h>
22 #include "byte-order.h"
23 #include "dynamic-string.h"
30 static struct cls_table
*find_table(const struct classifier
*,
31 const struct flow_wildcards
*);
32 static struct cls_table
*insert_table(struct classifier
*,
33 const struct flow_wildcards
*);
35 static void destroy_table(struct classifier
*, struct cls_table
*);
37 static struct cls_rule
*find_match(const struct cls_table
*,
39 static struct cls_rule
*find_equal(struct cls_table
*, const struct flow
*,
41 static struct cls_rule
*insert_rule(struct cls_table
*, struct cls_rule
*);
43 static bool flow_equal_except(const struct flow
*, const struct flow
*,
44 const struct flow_wildcards
*);
46 /* Iterates RULE over HEAD and all of the cls_rules on HEAD->list. */
47 #define FOR_EACH_RULE_IN_LIST(RULE, HEAD) \
48 for ((RULE) = (HEAD); (RULE) != NULL; (RULE) = next_rule_in_list(RULE))
49 #define FOR_EACH_RULE_IN_LIST_SAFE(RULE, NEXT, HEAD) \
50 for ((RULE) = (HEAD); \
51 (RULE) != NULL && ((NEXT) = next_rule_in_list(RULE), true); \
54 static struct cls_rule
*next_rule_in_list__(struct cls_rule
*);
55 static struct cls_rule
*next_rule_in_list(struct cls_rule
*);
57 /* Converts the flow in 'flow' into a cls_rule in 'rule', with the given
58 * 'wildcards' and 'priority'. */
60 cls_rule_init(const struct flow
*flow
, const struct flow_wildcards
*wildcards
,
61 unsigned int priority
, struct cls_rule
*rule
)
64 rule
->wc
= *wildcards
;
65 rule
->priority
= priority
;
66 cls_rule_zero_wildcarded_fields(rule
);
69 /* Converts the flow in 'flow' into an exact-match cls_rule in 'rule', with the
70 * given 'priority'. (For OpenFlow 1.0, exact-match rule are always highest
71 * priority, so 'priority' should be at least 65535.) */
73 cls_rule_init_exact(const struct flow
*flow
,
74 unsigned int priority
, struct cls_rule
*rule
)
77 rule
->flow
.skb_priority
= 0;
78 flow_wildcards_init_exact(&rule
->wc
);
79 rule
->priority
= priority
;
82 /* Initializes 'rule' as a "catch-all" rule that matches every packet, with
83 * priority 'priority'. */
85 cls_rule_init_catchall(struct cls_rule
*rule
, unsigned int priority
)
87 memset(&rule
->flow
, 0, sizeof rule
->flow
);
88 flow_wildcards_init_catchall(&rule
->wc
);
89 rule
->priority
= priority
;
92 /* For each bit or field wildcarded in 'rule', sets the corresponding bit or
93 * field in 'flow' to all-0-bits. It is important to maintain this invariant
94 * in a clr_rule that might be inserted into a classifier.
96 * It is never necessary to call this function directly for a cls_rule that is
97 * initialized or modified only by cls_rule_*() functions. It is useful to
98 * restore the invariant in a cls_rule whose 'wc' member is modified by hand.
101 cls_rule_zero_wildcarded_fields(struct cls_rule
*rule
)
103 flow_zero_wildcards(&rule
->flow
, &rule
->wc
);
107 cls_rule_set_reg(struct cls_rule
*rule
, unsigned int reg_idx
, uint32_t value
)
109 cls_rule_set_reg_masked(rule
, reg_idx
, value
, UINT32_MAX
);
113 cls_rule_set_reg_masked(struct cls_rule
*rule
, unsigned int reg_idx
,
114 uint32_t value
, uint32_t mask
)
116 assert(reg_idx
< FLOW_N_REGS
);
117 flow_wildcards_set_reg_mask(&rule
->wc
, reg_idx
, mask
);
118 rule
->flow
.regs
[reg_idx
] = value
& mask
;
122 cls_rule_set_metadata(struct cls_rule
*rule
, ovs_be64 metadata
)
124 cls_rule_set_metadata_masked(rule
, metadata
, htonll(UINT64_MAX
));
128 cls_rule_set_metadata_masked(struct cls_rule
*rule
, ovs_be64 metadata
,
131 rule
->wc
.metadata_mask
= mask
;
132 rule
->flow
.metadata
= metadata
& mask
;
136 cls_rule_set_tun_id(struct cls_rule
*rule
, ovs_be64 tun_id
)
138 cls_rule_set_tun_id_masked(rule
, tun_id
, htonll(UINT64_MAX
));
142 cls_rule_set_tun_id_masked(struct cls_rule
*rule
,
143 ovs_be64 tun_id
, ovs_be64 mask
)
145 rule
->wc
.tun_id_mask
= mask
;
146 rule
->flow
.tun_id
= tun_id
& mask
;
150 cls_rule_set_in_port(struct cls_rule
*rule
, uint16_t ofp_port
)
152 rule
->wc
.wildcards
&= ~FWW_IN_PORT
;
153 rule
->flow
.in_port
= ofp_port
;
157 cls_rule_set_dl_type(struct cls_rule
*rule
, ovs_be16 dl_type
)
159 rule
->wc
.wildcards
&= ~FWW_DL_TYPE
;
160 rule
->flow
.dl_type
= dl_type
;
163 /* Modifies 'value_src' so that the Ethernet address must match
164 * 'value_dst' exactly. 'mask_dst' is set to all 1s */
166 cls_rule_set_eth(const uint8_t value_src
[ETH_ADDR_LEN
],
167 uint8_t value_dst
[ETH_ADDR_LEN
],
168 uint8_t mask_dst
[ETH_ADDR_LEN
])
170 memcpy(value_dst
, value_src
, ETH_ADDR_LEN
);
171 memset(mask_dst
, 0xff, ETH_ADDR_LEN
);
174 /* Modifies 'value_src' so that the Ethernet address must match
175 * 'value_src' after each byte is ANDed with the appropriate byte in
176 * 'mask_src'. 'mask_dst' is set to 'mask_src' */
178 cls_rule_set_eth_masked(const uint8_t value_src
[ETH_ADDR_LEN
],
179 const uint8_t mask_src
[ETH_ADDR_LEN
],
180 uint8_t value_dst
[ETH_ADDR_LEN
],
181 uint8_t mask_dst
[ETH_ADDR_LEN
])
185 for (i
= 0; i
< ETH_ADDR_LEN
; i
++) {
186 value_dst
[i
] = value_src
[i
] & mask_src
[i
];
187 mask_dst
[i
] = mask_src
[i
];
191 /* Modifies 'rule' so that the source Ethernet address
192 * must match 'dl_src' exactly. */
194 cls_rule_set_dl_src(struct cls_rule
*rule
, const uint8_t dl_src
[ETH_ADDR_LEN
])
196 cls_rule_set_eth(dl_src
, rule
->flow
.dl_src
, rule
->wc
.dl_src_mask
);
199 /* Modifies 'rule' so that the source Ethernet address
200 * must match 'dl_src' after each byte is ANDed with
201 * the appropriate byte in 'mask'. */
203 cls_rule_set_dl_src_masked(struct cls_rule
*rule
,
204 const uint8_t dl_src
[ETH_ADDR_LEN
],
205 const uint8_t mask
[ETH_ADDR_LEN
])
207 cls_rule_set_eth_masked(dl_src
, mask
,
208 rule
->flow
.dl_src
, rule
->wc
.dl_src_mask
);
211 /* Modifies 'rule' so that the destination Ethernet address
212 * must match 'dl_dst' exactly. */
214 cls_rule_set_dl_dst(struct cls_rule
*rule
, const uint8_t dl_dst
[ETH_ADDR_LEN
])
216 cls_rule_set_eth(dl_dst
, rule
->flow
.dl_dst
, rule
->wc
.dl_dst_mask
);
219 /* Modifies 'rule' so that the destination Ethernet address
220 * must match 'dl_src' after each byte is ANDed with
221 * the appropriate byte in 'mask'. */
223 cls_rule_set_dl_dst_masked(struct cls_rule
*rule
,
224 const uint8_t dl_dst
[ETH_ADDR_LEN
],
225 const uint8_t mask
[ETH_ADDR_LEN
])
227 cls_rule_set_eth_masked(dl_dst
, mask
,
228 rule
->flow
.dl_dst
, rule
->wc
.dl_dst_mask
);
232 cls_rule_set_dl_tci(struct cls_rule
*rule
, ovs_be16 tci
)
234 cls_rule_set_dl_tci_masked(rule
, tci
, htons(0xffff));
238 cls_rule_set_dl_tci_masked(struct cls_rule
*rule
, ovs_be16 tci
, ovs_be16 mask
)
240 rule
->flow
.vlan_tci
= tci
& mask
;
241 rule
->wc
.vlan_tci_mask
= mask
;
244 /* Modifies 'rule' so that the VLAN VID is wildcarded. If the PCP is already
245 * wildcarded, then 'rule' will match a packet regardless of whether it has an
246 * 802.1Q header or not. */
248 cls_rule_set_any_vid(struct cls_rule
*rule
)
250 if (rule
->wc
.vlan_tci_mask
& htons(VLAN_PCP_MASK
)) {
251 rule
->wc
.vlan_tci_mask
&= ~htons(VLAN_VID_MASK
);
252 rule
->flow
.vlan_tci
&= ~htons(VLAN_VID_MASK
);
254 cls_rule_set_dl_tci_masked(rule
, htons(0), htons(0));
258 /* Modifies 'rule' depending on 'dl_vlan':
260 * - If 'dl_vlan' is htons(OFP_VLAN_NONE), makes 'rule' match only packets
261 * without an 802.1Q header.
263 * - Otherwise, makes 'rule' match only packets with an 802.1Q header whose
264 * VID equals the low 12 bits of 'dl_vlan'.
267 cls_rule_set_dl_vlan(struct cls_rule
*rule
, ovs_be16 dl_vlan
)
269 flow_set_dl_vlan(&rule
->flow
, dl_vlan
);
270 if (dl_vlan
== htons(OFP10_VLAN_NONE
)) {
271 rule
->wc
.vlan_tci_mask
= htons(UINT16_MAX
);
273 rule
->wc
.vlan_tci_mask
|= htons(VLAN_VID_MASK
| VLAN_CFI
);
277 /* Sets the VLAN VID that 'flow' matches to 'vid', which is interpreted as an
278 * OpenFlow 1.2 "vlan_vid" value, that is, the low 13 bits of 'vlan_tci' (VID
281 cls_rule_set_vlan_vid(struct cls_rule
*rule
, ovs_be16 vid
)
283 cls_rule_set_vlan_vid_masked(rule
, vid
, htons(VLAN_VID_MASK
| VLAN_CFI
));
287 /* Sets the VLAN VID that 'flow' matches to 'vid', which is interpreted as an
288 * OpenFlow 1.2 "vlan_vid" value, that is, the low 13 bits of 'vlan_tci' (VID
289 * plus CFI), with the corresponding 'mask'. */
291 cls_rule_set_vlan_vid_masked(struct cls_rule
*rule
,
292 ovs_be16 vid
, ovs_be16 mask
)
294 ovs_be16 pcp_mask
= htons(VLAN_PCP_MASK
);
295 ovs_be16 vid_mask
= htons(VLAN_VID_MASK
| VLAN_CFI
);
298 flow_set_vlan_vid(&rule
->flow
, vid
& mask
);
299 rule
->wc
.vlan_tci_mask
= mask
| (rule
->wc
.vlan_tci_mask
& pcp_mask
);
302 /* Modifies 'rule' so that the VLAN PCP is wildcarded. If the VID is already
303 * wildcarded, then 'rule' will match a packet regardless of whether it has an
304 * 802.1Q header or not. */
306 cls_rule_set_any_pcp(struct cls_rule
*rule
)
308 if (rule
->wc
.vlan_tci_mask
& htons(VLAN_VID_MASK
)) {
309 rule
->wc
.vlan_tci_mask
&= ~htons(VLAN_PCP_MASK
);
310 rule
->flow
.vlan_tci
&= ~htons(VLAN_PCP_MASK
);
312 cls_rule_set_dl_tci_masked(rule
, htons(0), htons(0));
316 /* Modifies 'rule' so that it matches only packets with an 802.1Q header whose
317 * PCP equals the low 3 bits of 'dl_vlan_pcp'. */
319 cls_rule_set_dl_vlan_pcp(struct cls_rule
*rule
, uint8_t dl_vlan_pcp
)
321 flow_set_vlan_pcp(&rule
->flow
, dl_vlan_pcp
);
322 rule
->wc
.vlan_tci_mask
|= htons(VLAN_CFI
| VLAN_PCP_MASK
);
326 cls_rule_set_tp_src(struct cls_rule
*rule
, ovs_be16 tp_src
)
328 cls_rule_set_tp_src_masked(rule
, tp_src
, htons(UINT16_MAX
));
332 cls_rule_set_tp_src_masked(struct cls_rule
*rule
, ovs_be16 port
, ovs_be16 mask
)
334 rule
->flow
.tp_src
= port
& mask
;
335 rule
->wc
.tp_src_mask
= mask
;
339 cls_rule_set_tp_dst(struct cls_rule
*rule
, ovs_be16 tp_dst
)
341 cls_rule_set_tp_dst_masked(rule
, tp_dst
, htons(UINT16_MAX
));
345 cls_rule_set_tp_dst_masked(struct cls_rule
*rule
, ovs_be16 port
, ovs_be16 mask
)
347 rule
->flow
.tp_dst
= port
& mask
;
348 rule
->wc
.tp_dst_mask
= mask
;
352 cls_rule_set_nw_proto(struct cls_rule
*rule
, uint8_t nw_proto
)
354 rule
->wc
.wildcards
&= ~FWW_NW_PROTO
;
355 rule
->flow
.nw_proto
= nw_proto
;
359 cls_rule_set_nw_src(struct cls_rule
*rule
, ovs_be32 nw_src
)
361 rule
->flow
.nw_src
= nw_src
;
362 rule
->wc
.nw_src_mask
= htonl(UINT32_MAX
);
366 cls_rule_set_nw_src_masked(struct cls_rule
*rule
,
367 ovs_be32 nw_src
, ovs_be32 mask
)
369 rule
->flow
.nw_src
= nw_src
& mask
;
370 rule
->wc
.nw_src_mask
= mask
;
374 cls_rule_set_nw_dst(struct cls_rule
*rule
, ovs_be32 nw_dst
)
376 rule
->flow
.nw_dst
= nw_dst
;
377 rule
->wc
.nw_dst_mask
= htonl(UINT32_MAX
);
381 cls_rule_set_nw_dst_masked(struct cls_rule
*rule
, ovs_be32 ip
, ovs_be32 mask
)
383 rule
->flow
.nw_dst
= ip
& mask
;
384 rule
->wc
.nw_dst_mask
= mask
;
388 cls_rule_set_nw_dscp(struct cls_rule
*rule
, uint8_t nw_dscp
)
390 rule
->wc
.nw_tos_mask
|= IP_DSCP_MASK
;
391 rule
->flow
.nw_tos
&= ~IP_DSCP_MASK
;
392 rule
->flow
.nw_tos
|= nw_dscp
& IP_DSCP_MASK
;
396 cls_rule_set_nw_ecn(struct cls_rule
*rule
, uint8_t nw_ecn
)
398 rule
->wc
.nw_tos_mask
|= IP_ECN_MASK
;
399 rule
->flow
.nw_tos
&= ~IP_ECN_MASK
;
400 rule
->flow
.nw_tos
|= nw_ecn
& IP_ECN_MASK
;
404 cls_rule_set_nw_ttl(struct cls_rule
*rule
, uint8_t nw_ttl
)
406 rule
->wc
.nw_ttl_mask
= UINT8_MAX
;
407 rule
->flow
.nw_ttl
= nw_ttl
;
411 cls_rule_set_nw_frag(struct cls_rule
*rule
, uint8_t nw_frag
)
413 rule
->wc
.nw_frag_mask
|= FLOW_NW_FRAG_MASK
;
414 rule
->flow
.nw_frag
= nw_frag
;
418 cls_rule_set_nw_frag_masked(struct cls_rule
*rule
,
419 uint8_t nw_frag
, uint8_t mask
)
421 rule
->flow
.nw_frag
= nw_frag
& mask
;
422 rule
->wc
.nw_frag_mask
= mask
;
426 cls_rule_set_icmp_type(struct cls_rule
*rule
, uint8_t icmp_type
)
428 cls_rule_set_tp_src(rule
, htons(icmp_type
));
432 cls_rule_set_icmp_code(struct cls_rule
*rule
, uint8_t icmp_code
)
434 cls_rule_set_tp_dst(rule
, htons(icmp_code
));
438 cls_rule_set_arp_sha(struct cls_rule
*rule
, const uint8_t sha
[ETH_ADDR_LEN
])
440 cls_rule_set_eth(sha
, rule
->flow
.arp_sha
, rule
->wc
.arp_sha_mask
);
444 cls_rule_set_arp_sha_masked(struct cls_rule
*rule
,
445 const uint8_t arp_sha
[ETH_ADDR_LEN
],
446 const uint8_t mask
[ETH_ADDR_LEN
])
448 cls_rule_set_eth_masked(arp_sha
, mask
,
449 rule
->flow
.arp_sha
, rule
->wc
.arp_sha_mask
);
453 cls_rule_set_arp_tha(struct cls_rule
*rule
, const uint8_t tha
[ETH_ADDR_LEN
])
455 cls_rule_set_eth(tha
, rule
->flow
.arp_tha
, rule
->wc
.arp_tha_mask
);
459 cls_rule_set_arp_tha_masked(struct cls_rule
*rule
,
460 const uint8_t arp_tha
[ETH_ADDR_LEN
],
461 const uint8_t mask
[ETH_ADDR_LEN
])
463 cls_rule_set_eth_masked(arp_tha
, mask
,
464 rule
->flow
.arp_tha
, rule
->wc
.arp_tha_mask
);
468 cls_rule_set_ipv6_src(struct cls_rule
*rule
, const struct in6_addr
*src
)
470 rule
->flow
.ipv6_src
= *src
;
471 rule
->wc
.ipv6_src_mask
= in6addr_exact
;
475 cls_rule_set_ipv6_src_masked(struct cls_rule
*rule
, const struct in6_addr
*src
,
476 const struct in6_addr
*mask
)
478 rule
->flow
.ipv6_src
= ipv6_addr_bitand(src
, mask
);
479 rule
->wc
.ipv6_src_mask
= *mask
;
483 cls_rule_set_ipv6_dst(struct cls_rule
*rule
, const struct in6_addr
*dst
)
485 rule
->flow
.ipv6_dst
= *dst
;
486 rule
->wc
.ipv6_dst_mask
= in6addr_exact
;
490 cls_rule_set_ipv6_dst_masked(struct cls_rule
*rule
, const struct in6_addr
*dst
,
491 const struct in6_addr
*mask
)
493 rule
->flow
.ipv6_dst
= ipv6_addr_bitand(dst
, mask
);
494 rule
->wc
.ipv6_dst_mask
= *mask
;
498 cls_rule_set_ipv6_label(struct cls_rule
*rule
, ovs_be32 ipv6_label
)
500 cls_rule_set_ipv6_label_masked(rule
, ipv6_label
, htonl(UINT32_MAX
));
504 cls_rule_set_ipv6_label_masked(struct cls_rule
*rule
, ovs_be32 ipv6_label
,
507 rule
->flow
.ipv6_label
= ipv6_label
& mask
;
508 rule
->wc
.ipv6_label_mask
= mask
;
512 cls_rule_set_nd_target(struct cls_rule
*rule
, const struct in6_addr
*target
)
514 rule
->flow
.nd_target
= *target
;
515 rule
->wc
.nd_target_mask
= in6addr_exact
;
519 cls_rule_set_nd_target_masked(struct cls_rule
*rule
,
520 const struct in6_addr
*target
,
521 const struct in6_addr
*mask
)
523 rule
->flow
.nd_target
= ipv6_addr_bitand(target
, mask
);
524 rule
->wc
.nd_target_mask
= *mask
;
527 /* Returns true if 'a' and 'b' have the same priority, wildcard the same
528 * fields, and have the same values for fixed fields, otherwise false. */
530 cls_rule_equal(const struct cls_rule
*a
, const struct cls_rule
*b
)
532 return (a
->priority
== b
->priority
533 && flow_wildcards_equal(&a
->wc
, &b
->wc
)
534 && flow_equal(&a
->flow
, &b
->flow
));
537 /* Returns a hash value for the flow, wildcards, and priority in 'rule',
538 * starting from 'basis'. */
540 cls_rule_hash(const struct cls_rule
*rule
, uint32_t basis
)
542 uint32_t h0
= flow_hash(&rule
->flow
, basis
);
543 uint32_t h1
= flow_wildcards_hash(&rule
->wc
, h0
);
544 return hash_int(rule
->priority
, h1
);
548 format_eth_masked(struct ds
*s
, const char *name
, const uint8_t eth
[6],
549 const uint8_t mask
[6])
551 if (!eth_addr_is_zero(mask
)) {
552 ds_put_format(s
, "%s=", name
);
553 eth_format_masked(eth
, mask
, s
);
559 format_ip_netmask(struct ds
*s
, const char *name
, ovs_be32 ip
,
563 ds_put_format(s
, "%s=", name
);
564 ip_format_masked(ip
, netmask
, s
);
570 format_ipv6_netmask(struct ds
*s
, const char *name
,
571 const struct in6_addr
*addr
,
572 const struct in6_addr
*netmask
)
574 if (!ipv6_mask_is_any(netmask
)) {
575 ds_put_format(s
, "%s=", name
);
576 print_ipv6_masked(s
, addr
, netmask
);
583 format_be16_masked(struct ds
*s
, const char *name
,
584 ovs_be16 value
, ovs_be16 mask
)
586 if (mask
!= htons(0)) {
587 ds_put_format(s
, "%s=", name
);
588 if (mask
== htons(UINT16_MAX
)) {
589 ds_put_format(s
, "%"PRIu16
, ntohs(value
));
591 ds_put_format(s
, "0x%"PRIx16
"/0x%"PRIx16
,
592 ntohs(value
), ntohs(mask
));
599 cls_rule_format(const struct cls_rule
*rule
, struct ds
*s
)
601 const struct flow_wildcards
*wc
= &rule
->wc
;
602 size_t start_len
= s
->length
;
603 flow_wildcards_t w
= wc
->wildcards
;
604 const struct flow
*f
= &rule
->flow
;
605 bool skip_type
= false;
606 bool skip_proto
= false;
610 BUILD_ASSERT_DECL(FLOW_WC_SEQ
== 15);
612 if (rule
->priority
!= OFP_DEFAULT_PRIORITY
) {
613 ds_put_format(s
, "priority=%d,", rule
->priority
);
616 if (!(w
& FWW_DL_TYPE
)) {
618 if (f
->dl_type
== htons(ETH_TYPE_IP
)) {
619 if (!(w
& FWW_NW_PROTO
)) {
621 if (f
->nw_proto
== IPPROTO_ICMP
) {
622 ds_put_cstr(s
, "icmp,");
623 } else if (f
->nw_proto
== IPPROTO_TCP
) {
624 ds_put_cstr(s
, "tcp,");
625 } else if (f
->nw_proto
== IPPROTO_UDP
) {
626 ds_put_cstr(s
, "udp,");
628 ds_put_cstr(s
, "ip,");
632 ds_put_cstr(s
, "ip,");
634 } else if (f
->dl_type
== htons(ETH_TYPE_IPV6
)) {
635 if (!(w
& FWW_NW_PROTO
)) {
637 if (f
->nw_proto
== IPPROTO_ICMPV6
) {
638 ds_put_cstr(s
, "icmp6,");
639 } else if (f
->nw_proto
== IPPROTO_TCP
) {
640 ds_put_cstr(s
, "tcp6,");
641 } else if (f
->nw_proto
== IPPROTO_UDP
) {
642 ds_put_cstr(s
, "udp6,");
644 ds_put_cstr(s
, "ipv6,");
648 ds_put_cstr(s
, "ipv6,");
650 } else if (f
->dl_type
== htons(ETH_TYPE_ARP
)) {
651 ds_put_cstr(s
, "arp,");
656 for (i
= 0; i
< FLOW_N_REGS
; i
++) {
657 switch (wc
->reg_masks
[i
]) {
661 ds_put_format(s
, "reg%d=0x%"PRIx32
",", i
, f
->regs
[i
]);
664 ds_put_format(s
, "reg%d=0x%"PRIx32
"/0x%"PRIx32
",",
665 i
, f
->regs
[i
], wc
->reg_masks
[i
]);
669 switch (wc
->tun_id_mask
) {
672 case CONSTANT_HTONLL(UINT64_MAX
):
673 ds_put_format(s
, "tun_id=%#"PRIx64
",", ntohll(f
->tun_id
));
676 ds_put_format(s
, "tun_id=%#"PRIx64
"/%#"PRIx64
",",
677 ntohll(f
->tun_id
), ntohll(wc
->tun_id_mask
));
680 switch (wc
->metadata_mask
) {
683 case CONSTANT_HTONLL(UINT64_MAX
):
684 ds_put_format(s
, "metadata=%#"PRIx64
",", ntohll(f
->metadata
));
687 ds_put_format(s
, "metadata=%#"PRIx64
"/%#"PRIx64
",",
688 ntohll(f
->metadata
), ntohll(wc
->metadata_mask
));
691 if (!(w
& FWW_IN_PORT
)) {
692 ds_put_format(s
, "in_port=%"PRIu16
",", f
->in_port
);
694 if (wc
->vlan_tci_mask
) {
695 ovs_be16 vid_mask
= wc
->vlan_tci_mask
& htons(VLAN_VID_MASK
);
696 ovs_be16 pcp_mask
= wc
->vlan_tci_mask
& htons(VLAN_PCP_MASK
);
697 ovs_be16 cfi
= wc
->vlan_tci_mask
& htons(VLAN_CFI
);
699 if (cfi
&& f
->vlan_tci
& htons(VLAN_CFI
)
700 && (!vid_mask
|| vid_mask
== htons(VLAN_VID_MASK
))
701 && (!pcp_mask
|| pcp_mask
== htons(VLAN_PCP_MASK
))
702 && (vid_mask
|| pcp_mask
)) {
704 ds_put_format(s
, "dl_vlan=%"PRIu16
",",
705 vlan_tci_to_vid(f
->vlan_tci
));
708 ds_put_format(s
, "dl_vlan_pcp=%d,",
709 vlan_tci_to_pcp(f
->vlan_tci
));
711 } else if (wc
->vlan_tci_mask
== htons(0xffff)) {
712 ds_put_format(s
, "vlan_tci=0x%04"PRIx16
",", ntohs(f
->vlan_tci
));
714 ds_put_format(s
, "vlan_tci=0x%04"PRIx16
"/0x%04"PRIx16
",",
715 ntohs(f
->vlan_tci
), ntohs(wc
->vlan_tci_mask
));
718 format_eth_masked(s
, "dl_src", f
->dl_src
, wc
->dl_src_mask
);
719 format_eth_masked(s
, "dl_dst", f
->dl_dst
, wc
->dl_dst_mask
);
720 if (!skip_type
&& !(w
& FWW_DL_TYPE
)) {
721 ds_put_format(s
, "dl_type=0x%04"PRIx16
",", ntohs(f
->dl_type
));
723 if (f
->dl_type
== htons(ETH_TYPE_IPV6
)) {
724 format_ipv6_netmask(s
, "ipv6_src", &f
->ipv6_src
, &wc
->ipv6_src_mask
);
725 format_ipv6_netmask(s
, "ipv6_dst", &f
->ipv6_dst
, &wc
->ipv6_dst_mask
);
726 if (wc
->ipv6_label_mask
) {
727 if (wc
->ipv6_label_mask
== htonl(UINT32_MAX
)) {
728 ds_put_format(s
, "ipv6_label=0x%05"PRIx32
",",
729 ntohl(f
->ipv6_label
));
731 ds_put_format(s
, "ipv6_label=0x%05"PRIx32
"/0x%05"PRIx32
",",
732 ntohl(f
->ipv6_label
),
733 ntohl(wc
->ipv6_label_mask
));
737 format_ip_netmask(s
, "nw_src", f
->nw_src
, wc
->nw_src_mask
);
738 format_ip_netmask(s
, "nw_dst", f
->nw_dst
, wc
->nw_dst_mask
);
740 if (!skip_proto
&& !(w
& FWW_NW_PROTO
)) {
741 if (f
->dl_type
== htons(ETH_TYPE_ARP
)) {
742 ds_put_format(s
, "arp_op=%"PRIu8
",", f
->nw_proto
);
744 ds_put_format(s
, "nw_proto=%"PRIu8
",", f
->nw_proto
);
747 if (f
->dl_type
== htons(ETH_TYPE_ARP
)) {
748 format_eth_masked(s
, "arp_sha", f
->arp_sha
, wc
->arp_sha_mask
);
749 format_eth_masked(s
, "arp_tha", f
->arp_tha
, wc
->arp_tha_mask
);
751 if (wc
->nw_tos_mask
& IP_DSCP_MASK
) {
752 ds_put_format(s
, "nw_tos=%"PRIu8
",", f
->nw_tos
& IP_DSCP_MASK
);
754 if (wc
->nw_tos_mask
& IP_ECN_MASK
) {
755 ds_put_format(s
, "nw_ecn=%"PRIu8
",", f
->nw_tos
& IP_ECN_MASK
);
757 if (wc
->nw_ttl_mask
) {
758 ds_put_format(s
, "nw_ttl=%"PRIu8
",", f
->nw_ttl
);
760 switch (wc
->nw_frag_mask
) {
761 case FLOW_NW_FRAG_ANY
| FLOW_NW_FRAG_LATER
:
762 ds_put_format(s
, "nw_frag=%s,",
763 f
->nw_frag
& FLOW_NW_FRAG_ANY
764 ? (f
->nw_frag
& FLOW_NW_FRAG_LATER
? "later" : "first")
765 : (f
->nw_frag
& FLOW_NW_FRAG_LATER
? "<error>" : "no"));
768 case FLOW_NW_FRAG_ANY
:
769 ds_put_format(s
, "nw_frag=%s,",
770 f
->nw_frag
& FLOW_NW_FRAG_ANY
? "yes" : "no");
773 case FLOW_NW_FRAG_LATER
:
774 ds_put_format(s
, "nw_frag=%s,",
775 f
->nw_frag
& FLOW_NW_FRAG_LATER
? "later" : "not_later");
778 if (f
->nw_proto
== IPPROTO_ICMP
) {
779 format_be16_masked(s
, "icmp_type", f
->tp_src
, wc
->tp_src_mask
);
780 format_be16_masked(s
, "icmp_code", f
->tp_dst
, wc
->tp_dst_mask
);
781 } else if (f
->nw_proto
== IPPROTO_ICMPV6
) {
782 format_be16_masked(s
, "icmp_type", f
->tp_src
, wc
->tp_src_mask
);
783 format_be16_masked(s
, "icmp_code", f
->tp_dst
, wc
->tp_dst_mask
);
784 format_ipv6_netmask(s
, "nd_target", &f
->nd_target
,
785 &wc
->nd_target_mask
);
786 format_eth_masked(s
, "nd_sll", f
->arp_sha
, wc
->arp_sha_mask
);
787 format_eth_masked(s
, "nd_tll", f
->arp_tha
, wc
->arp_tha_mask
);
789 format_be16_masked(s
, "tp_src", f
->tp_src
, wc
->tp_src_mask
);
790 format_be16_masked(s
, "tp_dst", f
->tp_dst
, wc
->tp_dst_mask
);
793 if (s
->length
> start_len
&& ds_last(s
) == ',') {
798 /* Converts 'rule' to a string and returns the string. The caller must free
799 * the string (with free()). */
801 cls_rule_to_string(const struct cls_rule
*rule
)
803 struct ds s
= DS_EMPTY_INITIALIZER
;
804 cls_rule_format(rule
, &s
);
805 return ds_steal_cstr(&s
);
809 cls_rule_print(const struct cls_rule
*rule
)
811 char *s
= cls_rule_to_string(rule
);
816 /* Initializes 'cls' as a classifier that initially contains no classification
819 classifier_init(struct classifier
*cls
)
822 hmap_init(&cls
->tables
);
825 /* Destroys 'cls'. Rules within 'cls', if any, are not freed; this is the
826 * caller's responsibility. */
828 classifier_destroy(struct classifier
*cls
)
831 struct cls_table
*table
, *next_table
;
833 HMAP_FOR_EACH_SAFE (table
, next_table
, hmap_node
, &cls
->tables
) {
834 hmap_destroy(&table
->rules
);
835 hmap_remove(&cls
->tables
, &table
->hmap_node
);
838 hmap_destroy(&cls
->tables
);
842 /* Returns true if 'cls' contains no classification rules, false otherwise. */
844 classifier_is_empty(const struct classifier
*cls
)
846 return cls
->n_rules
== 0;
849 /* Returns the number of rules in 'classifier'. */
851 classifier_count(const struct classifier
*cls
)
856 /* Inserts 'rule' into 'cls'. Until 'rule' is removed from 'cls', the caller
857 * must not modify or free it.
859 * If 'cls' already contains an identical rule (including wildcards, values of
860 * fixed fields, and priority), replaces the old rule by 'rule' and returns the
861 * rule that was replaced. The caller takes ownership of the returned rule and
862 * is thus responsible for freeing it, etc., as necessary.
864 * Returns NULL if 'cls' does not contain a rule with an identical key, after
865 * inserting the new rule. In this case, no rules are displaced by the new
866 * rule, even rules that cannot have any effect because the new rule matches a
867 * superset of their flows and has higher priority. */
869 classifier_replace(struct classifier
*cls
, struct cls_rule
*rule
)
871 struct cls_rule
*old_rule
;
872 struct cls_table
*table
;
874 table
= find_table(cls
, &rule
->wc
);
876 table
= insert_table(cls
, &rule
->wc
);
879 old_rule
= insert_rule(table
, rule
);
881 table
->n_table_rules
++;
887 /* Inserts 'rule' into 'cls'. Until 'rule' is removed from 'cls', the caller
888 * must not modify or free it.
890 * 'cls' must not contain an identical rule (including wildcards, values of
891 * fixed fields, and priority). Use classifier_find_rule_exactly() to find
894 classifier_insert(struct classifier
*cls
, struct cls_rule
*rule
)
896 struct cls_rule
*displaced_rule
= classifier_replace(cls
, rule
);
897 assert(!displaced_rule
);
900 /* Removes 'rule' from 'cls'. It is the caller's responsibility to free
901 * 'rule', if this is desirable. */
903 classifier_remove(struct classifier
*cls
, struct cls_rule
*rule
)
905 struct cls_rule
*head
;
906 struct cls_table
*table
;
908 table
= find_table(cls
, &rule
->wc
);
909 head
= find_equal(table
, &rule
->flow
, rule
->hmap_node
.hash
);
911 list_remove(&rule
->list
);
912 } else if (list_is_empty(&rule
->list
)) {
913 hmap_remove(&table
->rules
, &rule
->hmap_node
);
915 struct cls_rule
*next
= CONTAINER_OF(rule
->list
.next
,
916 struct cls_rule
, list
);
918 list_remove(&rule
->list
);
919 hmap_replace(&table
->rules
, &rule
->hmap_node
, &next
->hmap_node
);
922 if (--table
->n_table_rules
== 0) {
923 destroy_table(cls
, table
);
929 /* Finds and returns the highest-priority rule in 'cls' that matches 'flow'.
930 * Returns a null pointer if no rules in 'cls' match 'flow'. If multiple rules
931 * of equal priority match 'flow', returns one arbitrarily. */
933 classifier_lookup(const struct classifier
*cls
, const struct flow
*flow
)
935 struct cls_table
*table
;
936 struct cls_rule
*best
;
939 HMAP_FOR_EACH (table
, hmap_node
, &cls
->tables
) {
940 struct cls_rule
*rule
= find_match(table
, flow
);
941 if (rule
&& (!best
|| rule
->priority
> best
->priority
)) {
948 /* Finds and returns a rule in 'cls' with exactly the same priority and
949 * matching criteria as 'target'. Returns a null pointer if 'cls' doesn't
950 * contain an exact match. */
952 classifier_find_rule_exactly(const struct classifier
*cls
,
953 const struct cls_rule
*target
)
955 struct cls_rule
*head
, *rule
;
956 struct cls_table
*table
;
958 table
= find_table(cls
, &target
->wc
);
963 head
= find_equal(table
, &target
->flow
, flow_hash(&target
->flow
, 0));
964 FOR_EACH_RULE_IN_LIST (rule
, head
) {
965 if (target
->priority
>= rule
->priority
) {
966 return target
->priority
== rule
->priority
? rule
: NULL
;
972 /* Checks if 'target' would overlap any other rule in 'cls'. Two rules are
973 * considered to overlap if both rules have the same priority and a packet
974 * could match both. */
976 classifier_rule_overlaps(const struct classifier
*cls
,
977 const struct cls_rule
*target
)
979 struct cls_table
*table
;
981 HMAP_FOR_EACH (table
, hmap_node
, &cls
->tables
) {
982 struct flow_wildcards wc
;
983 struct cls_rule
*head
;
985 flow_wildcards_combine(&wc
, &target
->wc
, &table
->wc
);
986 HMAP_FOR_EACH (head
, hmap_node
, &table
->rules
) {
987 struct cls_rule
*rule
;
989 FOR_EACH_RULE_IN_LIST (rule
, head
) {
990 if (rule
->priority
== target
->priority
991 && flow_equal_except(&target
->flow
, &rule
->flow
, &wc
)) {
1001 /* Returns true if 'rule' exactly matches 'criteria' or if 'rule' is more
1002 * specific than 'criteria'. That is, 'rule' matches 'criteria' and this
1003 * function returns true if, for every field:
1005 * - 'criteria' and 'rule' specify the same (non-wildcarded) value for the
1008 * - 'criteria' wildcards the field,
1010 * Conversely, 'rule' does not match 'criteria' and this function returns false
1011 * if, for at least one field:
1013 * - 'criteria' and 'rule' specify different values for the field, or
1015 * - 'criteria' specifies a value for the field but 'rule' wildcards it.
1017 * Equivalently, the truth table for whether a field matches is:
1022 * r +---------+---------+
1023 * i wild | yes | yes |
1025 * e +---------+---------+
1026 * r exact | no |if values|
1028 * a +---------+---------+
1030 * This is the matching rule used by OpenFlow 1.0 non-strict OFPT_FLOW_MOD
1031 * commands and by OpenFlow 1.0 aggregate and flow stats.
1033 * Ignores rule->priority and criteria->priority. */
1035 cls_rule_is_loose_match(const struct cls_rule
*rule
,
1036 const struct cls_rule
*criteria
)
1038 return (!flow_wildcards_has_extra(&rule
->wc
, &criteria
->wc
)
1039 && flow_equal_except(&rule
->flow
, &criteria
->flow
, &criteria
->wc
));
1045 rule_matches(const struct cls_rule
*rule
, const struct cls_rule
*target
)
1048 || flow_equal_except(&rule
->flow
, &target
->flow
, &target
->wc
));
1051 static struct cls_rule
*
1052 search_table(const struct cls_table
*table
, const struct cls_rule
*target
)
1054 if (!target
|| !flow_wildcards_has_extra(&table
->wc
, &target
->wc
)) {
1055 struct cls_rule
*rule
;
1057 HMAP_FOR_EACH (rule
, hmap_node
, &table
->rules
) {
1058 if (rule_matches(rule
, target
)) {
1066 /* Initializes 'cursor' for iterating through rules in 'cls':
1068 * - If 'target' is null, the cursor will visit every rule in 'cls'.
1070 * - If 'target' is nonnull, the cursor will visit each 'rule' in 'cls'
1071 * such that cls_rule_is_loose_match(rule, target) returns true.
1073 * Ignores target->priority. */
1075 cls_cursor_init(struct cls_cursor
*cursor
, const struct classifier
*cls
,
1076 const struct cls_rule
*target
)
1079 cursor
->target
= target
;
1082 /* Returns the first matching cls_rule in 'cursor''s iteration, or a null
1083 * pointer if there are no matches. */
1085 cls_cursor_first(struct cls_cursor
*cursor
)
1087 struct cls_table
*table
;
1089 HMAP_FOR_EACH (table
, hmap_node
, &cursor
->cls
->tables
) {
1090 struct cls_rule
*rule
= search_table(table
, cursor
->target
);
1092 cursor
->table
= table
;
1100 /* Returns the next matching cls_rule in 'cursor''s iteration, or a null
1101 * pointer if there are no more matches. */
1103 cls_cursor_next(struct cls_cursor
*cursor
, struct cls_rule
*rule
)
1105 const struct cls_table
*table
;
1106 struct cls_rule
*next
;
1108 next
= next_rule_in_list__(rule
);
1109 if (next
->priority
< rule
->priority
) {
1113 /* 'next' is the head of the list, that is, the rule that is included in
1114 * the table's hmap. (This is important when the classifier contains rules
1115 * that differ only in priority.) */
1117 HMAP_FOR_EACH_CONTINUE (rule
, hmap_node
, &cursor
->table
->rules
) {
1118 if (rule_matches(rule
, cursor
->target
)) {
1123 table
= cursor
->table
;
1124 HMAP_FOR_EACH_CONTINUE (table
, hmap_node
, &cursor
->cls
->tables
) {
1125 rule
= search_table(table
, cursor
->target
);
1127 cursor
->table
= table
;
1135 static struct cls_table
*
1136 find_table(const struct classifier
*cls
, const struct flow_wildcards
*wc
)
1138 struct cls_table
*table
;
1140 HMAP_FOR_EACH_IN_BUCKET (table
, hmap_node
, flow_wildcards_hash(wc
, 0),
1142 if (flow_wildcards_equal(wc
, &table
->wc
)) {
1149 static struct cls_table
*
1150 insert_table(struct classifier
*cls
, const struct flow_wildcards
*wc
)
1152 struct cls_table
*table
;
1154 table
= xzalloc(sizeof *table
);
1155 hmap_init(&table
->rules
);
1157 table
->is_catchall
= flow_wildcards_is_catchall(&table
->wc
);
1158 hmap_insert(&cls
->tables
, &table
->hmap_node
, flow_wildcards_hash(wc
, 0));
1164 destroy_table(struct classifier
*cls
, struct cls_table
*table
)
1166 hmap_remove(&cls
->tables
, &table
->hmap_node
);
1167 hmap_destroy(&table
->rules
);
1171 static struct cls_rule
*
1172 find_match(const struct cls_table
*table
, const struct flow
*flow
)
1174 struct cls_rule
*rule
;
1176 if (table
->is_catchall
) {
1177 HMAP_FOR_EACH (rule
, hmap_node
, &table
->rules
) {
1184 flow_zero_wildcards(&f
, &table
->wc
);
1185 HMAP_FOR_EACH_WITH_HASH (rule
, hmap_node
, flow_hash(&f
, 0),
1187 if (flow_equal(&f
, &rule
->flow
)) {
1196 static struct cls_rule
*
1197 find_equal(struct cls_table
*table
, const struct flow
*flow
, uint32_t hash
)
1199 struct cls_rule
*head
;
1201 HMAP_FOR_EACH_WITH_HASH (head
, hmap_node
, hash
, &table
->rules
) {
1202 if (flow_equal(&head
->flow
, flow
)) {
1209 static struct cls_rule
*
1210 insert_rule(struct cls_table
*table
, struct cls_rule
*new)
1212 struct cls_rule
*head
;
1214 new->hmap_node
.hash
= flow_hash(&new->flow
, 0);
1216 head
= find_equal(table
, &new->flow
, new->hmap_node
.hash
);
1218 hmap_insert(&table
->rules
, &new->hmap_node
, new->hmap_node
.hash
);
1219 list_init(&new->list
);
1222 /* Scan the list for the insertion point that will keep the list in
1223 * order of decreasing priority. */
1224 struct cls_rule
*rule
;
1225 FOR_EACH_RULE_IN_LIST (rule
, head
) {
1226 if (new->priority
>= rule
->priority
) {
1228 /* 'new' is the new highest-priority flow in the list. */
1229 hmap_replace(&table
->rules
,
1230 &rule
->hmap_node
, &new->hmap_node
);
1233 if (new->priority
== rule
->priority
) {
1234 list_replace(&new->list
, &rule
->list
);
1237 list_insert(&rule
->list
, &new->list
);
1243 /* Insert 'new' at the end of the list. */
1244 list_push_back(&head
->list
, &new->list
);
1249 static struct cls_rule
*
1250 next_rule_in_list__(struct cls_rule
*rule
)
1252 struct cls_rule
*next
= OBJECT_CONTAINING(rule
->list
.next
, next
, list
);
1256 static struct cls_rule
*
1257 next_rule_in_list(struct cls_rule
*rule
)
1259 struct cls_rule
*next
= next_rule_in_list__(rule
);
1260 return next
->priority
< rule
->priority
? next
: NULL
;
1264 ipv6_equal_except(const struct in6_addr
*a
, const struct in6_addr
*b
,
1265 const struct in6_addr
*mask
)
1270 for (i
=0; i
<4; i
++) {
1271 if ((a
->s6_addr32
[i
] ^ b
->s6_addr32
[i
]) & mask
->s6_addr32
[i
]) {
1276 for (i
=0; i
<16; i
++) {
1277 if ((a
->s6_addr
[i
] ^ b
->s6_addr
[i
]) & mask
->s6_addr
[i
]) {
1288 flow_equal_except(const struct flow
*a
, const struct flow
*b
,
1289 const struct flow_wildcards
*wildcards
)
1291 const flow_wildcards_t wc
= wildcards
->wildcards
;
1294 BUILD_ASSERT_DECL(FLOW_WC_SEQ
== 15);
1296 for (i
= 0; i
< FLOW_N_REGS
; i
++) {
1297 if ((a
->regs
[i
] ^ b
->regs
[i
]) & wildcards
->reg_masks
[i
]) {
1302 return (!((a
->tun_id
^ b
->tun_id
) & wildcards
->tun_id_mask
)
1303 && !((a
->metadata
^ b
->metadata
) & wildcards
->metadata_mask
)
1304 && !((a
->nw_src
^ b
->nw_src
) & wildcards
->nw_src_mask
)
1305 && !((a
->nw_dst
^ b
->nw_dst
) & wildcards
->nw_dst_mask
)
1306 && (wc
& FWW_IN_PORT
|| a
->in_port
== b
->in_port
)
1307 && !((a
->vlan_tci
^ b
->vlan_tci
) & wildcards
->vlan_tci_mask
)
1308 && (wc
& FWW_DL_TYPE
|| a
->dl_type
== b
->dl_type
)
1309 && !((a
->tp_src
^ b
->tp_src
) & wildcards
->tp_src_mask
)
1310 && !((a
->tp_dst
^ b
->tp_dst
) & wildcards
->tp_dst_mask
)
1311 && eth_addr_equal_except(a
->dl_src
, b
->dl_src
,
1312 wildcards
->dl_src_mask
)
1313 && eth_addr_equal_except(a
->dl_dst
, b
->dl_dst
,
1314 wildcards
->dl_dst_mask
)
1315 && (wc
& FWW_NW_PROTO
|| a
->nw_proto
== b
->nw_proto
)
1316 && !((a
->nw_ttl
^ b
->nw_ttl
) & wildcards
->nw_ttl_mask
)
1317 && !((a
->nw_tos
^ b
->nw_tos
) & wildcards
->nw_tos_mask
)
1318 && !((a
->nw_frag
^ b
->nw_frag
) & wildcards
->nw_frag_mask
)
1319 && eth_addr_equal_except(a
->arp_sha
, b
->arp_sha
,
1320 wildcards
->arp_sha_mask
)
1321 && eth_addr_equal_except(a
->arp_tha
, b
->arp_tha
,
1322 wildcards
->arp_tha_mask
)
1323 && !((a
->ipv6_label
^ b
->ipv6_label
) & wildcards
->ipv6_label_mask
)
1324 && ipv6_equal_except(&a
->ipv6_src
, &b
->ipv6_src
,
1325 &wildcards
->ipv6_src_mask
)
1326 && ipv6_equal_except(&a
->ipv6_dst
, &b
->ipv6_dst
,
1327 &wildcards
->ipv6_dst_mask
)
1328 && ipv6_equal_except(&a
->nd_target
, &b
->nd_target
,
1329 &wildcards
->nd_target_mask
));