2 * Copyright (c) 2016 Mellanox Technologies, Ltd.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 #include "netdev-tc-offloads.h"
21 #include <linux/if_ether.h>
25 #include "openvswitch/hmap.h"
26 #include "openvswitch/match.h"
27 #include "openvswitch/ofpbuf.h"
28 #include "openvswitch/thread.h"
29 #include "openvswitch/types.h"
30 #include "openvswitch/util.h"
31 #include "openvswitch/vlog.h"
32 #include "netdev-linux.h"
34 #include "netlink-socket.h"
35 #include "odp-netlink.h"
38 #include "unaligned.h"
41 VLOG_DEFINE_THIS_MODULE(netdev_tc_offloads
);
43 static struct vlog_rate_limit error_rl
= VLOG_RATE_LIMIT_INIT(60, 5);
45 static struct hmap ufid_tc
= HMAP_INITIALIZER(&ufid_tc
);
46 static bool multi_mask_per_prio
= false;
47 static bool block_support
= false;
49 struct netlink_field
{
55 static struct netlink_field set_flower_map
[][3] = {
56 [OVS_KEY_ATTR_IPV4
] = {
57 { offsetof(struct ovs_key_ipv4
, ipv4_src
),
58 offsetof(struct tc_flower_key
, ipv4
.ipv4_src
),
59 MEMBER_SIZEOF(struct tc_flower_key
, ipv4
.ipv4_src
)
61 { offsetof(struct ovs_key_ipv4
, ipv4_dst
),
62 offsetof(struct tc_flower_key
, ipv4
.ipv4_dst
),
63 MEMBER_SIZEOF(struct tc_flower_key
, ipv4
.ipv4_dst
)
65 { offsetof(struct ovs_key_ipv4
, ipv4_ttl
),
66 offsetof(struct tc_flower_key
, ipv4
.rewrite_ttl
),
67 MEMBER_SIZEOF(struct tc_flower_key
, ipv4
.rewrite_ttl
)
70 [OVS_KEY_ATTR_IPV6
] = {
71 { offsetof(struct ovs_key_ipv6
, ipv6_src
),
72 offsetof(struct tc_flower_key
, ipv6
.ipv6_src
),
73 MEMBER_SIZEOF(struct tc_flower_key
, ipv6
.ipv6_src
)
75 { offsetof(struct ovs_key_ipv6
, ipv6_dst
),
76 offsetof(struct tc_flower_key
, ipv6
.ipv6_dst
),
77 MEMBER_SIZEOF(struct tc_flower_key
, ipv6
.ipv6_dst
)
80 [OVS_KEY_ATTR_ETHERNET
] = {
81 { offsetof(struct ovs_key_ethernet
, eth_src
),
82 offsetof(struct tc_flower_key
, src_mac
),
83 MEMBER_SIZEOF(struct tc_flower_key
, src_mac
)
85 { offsetof(struct ovs_key_ethernet
, eth_dst
),
86 offsetof(struct tc_flower_key
, dst_mac
),
87 MEMBER_SIZEOF(struct tc_flower_key
, dst_mac
)
90 [OVS_KEY_ATTR_ETHERTYPE
] = {
92 offsetof(struct tc_flower_key
, eth_type
),
93 MEMBER_SIZEOF(struct tc_flower_key
, eth_type
)
96 [OVS_KEY_ATTR_TCP
] = {
97 { offsetof(struct ovs_key_tcp
, tcp_src
),
98 offsetof(struct tc_flower_key
, tcp_src
),
99 MEMBER_SIZEOF(struct tc_flower_key
, tcp_src
)
101 { offsetof(struct ovs_key_tcp
, tcp_dst
),
102 offsetof(struct tc_flower_key
, tcp_dst
),
103 MEMBER_SIZEOF(struct tc_flower_key
, tcp_dst
)
106 [OVS_KEY_ATTR_UDP
] = {
107 { offsetof(struct ovs_key_udp
, udp_src
),
108 offsetof(struct tc_flower_key
, udp_src
),
109 MEMBER_SIZEOF(struct tc_flower_key
, udp_src
)
111 { offsetof(struct ovs_key_udp
, udp_dst
),
112 offsetof(struct tc_flower_key
, udp_dst
),
113 MEMBER_SIZEOF(struct tc_flower_key
, udp_dst
)
118 static struct ovs_mutex ufid_lock
= OVS_MUTEX_INITIALIZER
;
121 * struct ufid_tc_data - data entry for ufid_tc hmap.
122 * @ufid_node: Element in @ufid_tc hash table by ufid key.
123 * @tc_node: Element in @ufid_tc hash table by prio/handle/ifindex key.
124 * @ufid: ufid assigned to the flow
127 * @ifindex: netdev ifindex.
128 * @netdev: netdev associated with the tc rule
130 struct ufid_tc_data
{
131 struct hmap_node ufid_node
;
132 struct hmap_node tc_node
;
137 struct netdev
*netdev
;
140 /* Remove matching ufid entry from ufid_tc hashmap. */
142 del_ufid_tc_mapping(const ovs_u128
*ufid
)
144 size_t ufid_hash
= hash_bytes(ufid
, sizeof *ufid
, 0);
145 struct ufid_tc_data
*data
;
147 ovs_mutex_lock(&ufid_lock
);
148 HMAP_FOR_EACH_WITH_HASH(data
, ufid_node
, ufid_hash
, &ufid_tc
) {
149 if (ovs_u128_equals(*ufid
, data
->ufid
)) {
155 ovs_mutex_unlock(&ufid_lock
);
159 hmap_remove(&ufid_tc
, &data
->ufid_node
);
160 hmap_remove(&ufid_tc
, &data
->tc_node
);
161 netdev_close(data
->netdev
);
163 ovs_mutex_unlock(&ufid_lock
);
166 /* Add ufid entry to ufid_tc hashmap.
167 * If entry exists already it will be replaced. */
169 add_ufid_tc_mapping(const ovs_u128
*ufid
, int prio
, int handle
,
170 struct netdev
*netdev
, int ifindex
)
172 size_t ufid_hash
= hash_bytes(ufid
, sizeof *ufid
, 0);
173 size_t tc_hash
= hash_int(hash_int(prio
, handle
), ifindex
);
174 struct ufid_tc_data
*new_data
= xzalloc(sizeof *new_data
);
176 del_ufid_tc_mapping(ufid
);
178 new_data
->ufid
= *ufid
;
179 new_data
->prio
= prio
;
180 new_data
->handle
= handle
;
181 new_data
->netdev
= netdev_ref(netdev
);
182 new_data
->ifindex
= ifindex
;
184 ovs_mutex_lock(&ufid_lock
);
185 hmap_insert(&ufid_tc
, &new_data
->ufid_node
, ufid_hash
);
186 hmap_insert(&ufid_tc
, &new_data
->tc_node
, tc_hash
);
187 ovs_mutex_unlock(&ufid_lock
);
190 /* Get ufid from ufid_tc hashmap.
192 * If netdev output param is not NULL then the function will return
193 * associated netdev on success and a refcount is taken on that netdev.
194 * The caller is then responsible to close the netdev.
196 * Returns handle if successful and fill prio and netdev for that ufid.
197 * Otherwise returns 0.
200 get_ufid_tc_mapping(const ovs_u128
*ufid
, int *prio
, struct netdev
**netdev
)
202 size_t ufid_hash
= hash_bytes(ufid
, sizeof *ufid
, 0);
203 struct ufid_tc_data
*data
;
206 ovs_mutex_lock(&ufid_lock
);
207 HMAP_FOR_EACH_WITH_HASH(data
, ufid_node
, ufid_hash
, &ufid_tc
) {
208 if (ovs_u128_equals(*ufid
, data
->ufid
)) {
213 *netdev
= netdev_ref(data
->netdev
);
215 handle
= data
->handle
;
219 ovs_mutex_unlock(&ufid_lock
);
224 /* Find ufid entry in ufid_tc hashmap using prio, handle and netdev.
225 * The result is saved in ufid.
227 * Returns true on success.
230 find_ufid(int prio
, int handle
, struct netdev
*netdev
, ovs_u128
*ufid
)
232 int ifindex
= netdev_get_ifindex(netdev
);
233 struct ufid_tc_data
*data
;
234 size_t tc_hash
= hash_int(hash_int(prio
, handle
), ifindex
);
236 ovs_mutex_lock(&ufid_lock
);
237 HMAP_FOR_EACH_WITH_HASH(data
, tc_node
, tc_hash
, &ufid_tc
) {
238 if (data
->prio
== prio
&& data
->handle
== handle
239 && data
->ifindex
== ifindex
) {
244 ovs_mutex_unlock(&ufid_lock
);
246 return (data
!= NULL
);
249 struct prio_map_data
{
250 struct hmap_node node
;
251 struct tc_flower_key mask
;
256 /* Get free prio for tc flower
257 * If prio is already allocated for mask/eth_type combination then return it.
258 * If not assign new prio.
260 * Return prio on success or 0 if we are out of prios.
263 get_prio_for_tc_flower(struct tc_flower
*flower
)
265 static struct hmap prios
= HMAP_INITIALIZER(&prios
);
266 static struct ovs_mutex prios_lock
= OVS_MUTEX_INITIALIZER
;
267 static uint16_t last_prio
= 0;
268 size_t key_len
= sizeof(struct tc_flower_key
);
269 size_t hash
= hash_int((OVS_FORCE
uint32_t) flower
->key
.eth_type
, 0);
270 struct prio_map_data
*data
;
271 struct prio_map_data
*new_data
;
273 if (!multi_mask_per_prio
) {
274 hash
= hash_bytes(&flower
->mask
, key_len
, hash
);
277 /* We can use the same prio for same mask/eth combination but must have
278 * different prio if not. Flower classifier will reject same prio for
279 * different mask combination unless multi mask per prio is supported. */
280 ovs_mutex_lock(&prios_lock
);
281 HMAP_FOR_EACH_WITH_HASH(data
, node
, hash
, &prios
) {
282 if ((multi_mask_per_prio
283 || !memcmp(&flower
->mask
, &data
->mask
, key_len
))
284 && data
->protocol
== flower
->key
.eth_type
) {
285 ovs_mutex_unlock(&prios_lock
);
290 if (last_prio
== UINT16_MAX
) {
291 /* last_prio can overflow if there will be many different kinds of
292 * flows which shouldn't happen organically. */
293 ovs_mutex_unlock(&prios_lock
);
297 new_data
= xzalloc(sizeof *new_data
);
298 memcpy(&new_data
->mask
, &flower
->mask
, key_len
);
299 new_data
->prio
= ++last_prio
;
300 new_data
->protocol
= flower
->key
.eth_type
;
301 hmap_insert(&prios
, &new_data
->node
, hash
);
302 ovs_mutex_unlock(&prios_lock
);
304 return new_data
->prio
;
308 get_block_id_from_netdev(struct netdev
*netdev
)
311 return netdev_get_block_id(netdev
);
318 netdev_tc_flow_flush(struct netdev
*netdev
)
320 int ifindex
= netdev_get_ifindex(netdev
);
321 uint32_t block_id
= 0;
324 VLOG_ERR_RL(&error_rl
, "flow_flush: failed to get ifindex for %s: %s",
325 netdev_get_name(netdev
), ovs_strerror(-ifindex
));
329 block_id
= get_block_id_from_netdev(netdev
);
331 return tc_flush(ifindex
, block_id
);
335 netdev_tc_flow_dump_create(struct netdev
*netdev
,
336 struct netdev_flow_dump
**dump_out
)
338 struct netdev_flow_dump
*dump
;
339 uint32_t block_id
= 0;
342 ifindex
= netdev_get_ifindex(netdev
);
344 VLOG_ERR_RL(&error_rl
, "dump_create: failed to get ifindex for %s: %s",
345 netdev_get_name(netdev
), ovs_strerror(-ifindex
));
349 block_id
= get_block_id_from_netdev(netdev
);
350 dump
= xzalloc(sizeof *dump
);
351 dump
->nl_dump
= xzalloc(sizeof *dump
->nl_dump
);
352 dump
->netdev
= netdev_ref(netdev
);
353 tc_dump_flower_start(ifindex
, dump
->nl_dump
, block_id
);
361 netdev_tc_flow_dump_destroy(struct netdev_flow_dump
*dump
)
363 nl_dump_done(dump
->nl_dump
);
364 netdev_close(dump
->netdev
);
371 parse_flower_rewrite_to_netlink_action(struct ofpbuf
*buf
,
372 struct tc_flower
*flower
)
374 char *mask
= (char *) &flower
->rewrite
.mask
;
375 char *data
= (char *) &flower
->rewrite
.key
;
377 for (int type
= 0; type
< ARRAY_SIZE(set_flower_map
); type
++) {
380 int len
= ovs_flow_key_attr_lens
[type
].len
;
386 for (int j
= 0; j
< ARRAY_SIZE(set_flower_map
[type
]); j
++) {
387 struct netlink_field
*f
= &set_flower_map
[type
][j
];
393 if (!is_all_zeros(mask
+ f
->flower_offset
, f
->size
)) {
395 nested
= nl_msg_start_nested(buf
,
396 OVS_ACTION_ATTR_SET_MASKED
);
397 put
= nl_msg_put_unspec_zero(buf
, type
, len
* 2);
400 memcpy(put
+ f
->offset
, data
+ f
->flower_offset
, f
->size
);
401 memcpy(put
+ len
+ f
->offset
,
402 mask
+ f
->flower_offset
, f
->size
);
407 nl_msg_end_nested(buf
, nested
);
413 parse_tc_flower_to_match(struct tc_flower
*flower
,
415 struct nlattr
**actions
,
416 struct dpif_flow_stats
*stats
,
417 struct dpif_flow_attrs
*attrs
,
421 struct tc_flower_key
*key
= &flower
->key
;
422 struct tc_flower_key
*mask
= &flower
->mask
;
423 odp_port_t outport
= 0;
424 struct tc_action
*action
;
429 match_init_catchall(match
);
430 match_set_dl_src_masked(match
, key
->src_mac
, mask
->src_mac
);
431 match_set_dl_dst_masked(match
, key
->dst_mac
, mask
->dst_mac
);
433 if (eth_type_vlan(key
->eth_type
)) {
434 match
->flow
.vlans
[0].tpid
= key
->eth_type
;
435 match
->wc
.masks
.vlans
[0].tpid
= OVS_BE16_MAX
;
436 match_set_dl_vlan(match
, htons(key
->vlan_id
[0]), 0);
437 match_set_dl_vlan_pcp(match
, key
->vlan_prio
[0], 0);
439 if (eth_type_vlan(key
->encap_eth_type
[0])) {
440 match_set_dl_vlan(match
, htons(key
->vlan_id
[1]), 1);
441 match_set_dl_vlan_pcp(match
, key
->vlan_prio
[1], 1);
442 match_set_dl_type(match
, key
->encap_eth_type
[1]);
443 match
->flow
.vlans
[1].tpid
= key
->encap_eth_type
[0];
444 match
->wc
.masks
.vlans
[1].tpid
= OVS_BE16_MAX
;
446 match_set_dl_type(match
, key
->encap_eth_type
[0]);
448 flow_fix_vlan_tpid(&match
->flow
);
449 } else if (eth_type_mpls(key
->eth_type
)) {
450 match
->flow
.mpls_lse
[0] = key
->mpls_lse
& mask
->mpls_lse
;
451 match
->wc
.masks
.mpls_lse
[0] = mask
->mpls_lse
;
452 match_set_dl_type(match
, key
->encap_eth_type
[0]);
454 match_set_dl_type(match
, key
->eth_type
);
457 if (is_ip_any(&match
->flow
)) {
459 match_set_nw_proto(match
, key
->ip_proto
);
462 match_set_nw_tos_masked(match
, key
->ip_tos
, mask
->ip_tos
);
463 match_set_nw_ttl_masked(match
, key
->ip_ttl
, mask
->ip_ttl
);
467 uint8_t flags_mask
= 0;
469 if (mask
->flags
& TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT
) {
470 if (key
->flags
& TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT
) {
471 flags
|= FLOW_NW_FRAG_ANY
;
473 flags_mask
|= FLOW_NW_FRAG_ANY
;
476 if (mask
->flags
& TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST
) {
477 if (!(key
->flags
& TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST
)) {
478 flags
|= FLOW_NW_FRAG_LATER
;
480 flags_mask
|= FLOW_NW_FRAG_LATER
;
483 match_set_nw_frag_masked(match
, flags
, flags_mask
);
486 match_set_nw_src_masked(match
, key
->ipv4
.ipv4_src
, mask
->ipv4
.ipv4_src
);
487 match_set_nw_dst_masked(match
, key
->ipv4
.ipv4_dst
, mask
->ipv4
.ipv4_dst
);
489 match_set_ipv6_src_masked(match
,
490 &key
->ipv6
.ipv6_src
, &mask
->ipv6
.ipv6_src
);
491 match_set_ipv6_dst_masked(match
,
492 &key
->ipv6
.ipv6_dst
, &mask
->ipv6
.ipv6_dst
);
494 if (key
->ip_proto
== IPPROTO_TCP
) {
495 match_set_tp_dst_masked(match
, key
->tcp_dst
, mask
->tcp_dst
);
496 match_set_tp_src_masked(match
, key
->tcp_src
, mask
->tcp_src
);
497 match_set_tcp_flags_masked(match
, key
->tcp_flags
, mask
->tcp_flags
);
498 } else if (key
->ip_proto
== IPPROTO_UDP
) {
499 match_set_tp_dst_masked(match
, key
->udp_dst
, mask
->udp_dst
);
500 match_set_tp_src_masked(match
, key
->udp_src
, mask
->udp_src
);
501 } else if (key
->ip_proto
== IPPROTO_SCTP
) {
502 match_set_tp_dst_masked(match
, key
->sctp_dst
, mask
->sctp_dst
);
503 match_set_tp_src_masked(match
, key
->sctp_src
, mask
->sctp_src
);
507 if (flower
->tunnel
) {
508 match_set_tun_id(match
, flower
->key
.tunnel
.id
);
509 if (flower
->key
.tunnel
.ipv4
.ipv4_dst
) {
510 match_set_tun_src(match
, flower
->key
.tunnel
.ipv4
.ipv4_src
);
511 match_set_tun_dst(match
, flower
->key
.tunnel
.ipv4
.ipv4_dst
);
512 } else if (!is_all_zeros(&flower
->key
.tunnel
.ipv6
.ipv6_dst
,
513 sizeof flower
->key
.tunnel
.ipv6
.ipv6_dst
)) {
514 match_set_tun_ipv6_src(match
, &flower
->key
.tunnel
.ipv6
.ipv6_src
);
515 match_set_tun_ipv6_dst(match
, &flower
->key
.tunnel
.ipv6
.ipv6_dst
);
517 if (flower
->key
.tunnel
.tos
) {
518 match_set_tun_tos_masked(match
, flower
->key
.tunnel
.tos
,
519 flower
->mask
.tunnel
.tos
);
521 if (flower
->key
.tunnel
.ttl
) {
522 match_set_tun_ttl_masked(match
, flower
->key
.tunnel
.ttl
,
523 flower
->mask
.tunnel
.ttl
);
525 if (flower
->key
.tunnel
.tp_dst
) {
526 match_set_tun_tp_dst(match
, flower
->key
.tunnel
.tp_dst
);
530 act_off
= nl_msg_start_nested(buf
, OVS_FLOW_ATTR_ACTIONS
);
532 action
= flower
->actions
;
533 for (i
= 0; i
< flower
->action_count
; i
++, action
++) {
534 switch (action
->type
) {
535 case TC_ACT_VLAN_POP
: {
536 nl_msg_put_flag(buf
, OVS_ACTION_ATTR_POP_VLAN
);
539 case TC_ACT_VLAN_PUSH
: {
540 struct ovs_action_push_vlan
*push
;
542 push
= nl_msg_put_unspec_zero(buf
, OVS_ACTION_ATTR_PUSH_VLAN
,
544 push
->vlan_tpid
= action
->vlan
.vlan_push_tpid
;
545 push
->vlan_tci
= htons(action
->vlan
.vlan_push_id
546 | (action
->vlan
.vlan_push_prio
<< 13)
551 parse_flower_rewrite_to_netlink_action(buf
, flower
);
555 size_t set_offset
= nl_msg_start_nested(buf
, OVS_ACTION_ATTR_SET
);
556 size_t tunnel_offset
=
557 nl_msg_start_nested(buf
, OVS_KEY_ATTR_TUNNEL
);
559 nl_msg_put_be64(buf
, OVS_TUNNEL_KEY_ATTR_ID
, action
->encap
.id
);
560 if (action
->encap
.ipv4
.ipv4_src
) {
561 nl_msg_put_be32(buf
, OVS_TUNNEL_KEY_ATTR_IPV4_SRC
,
562 action
->encap
.ipv4
.ipv4_src
);
564 if (action
->encap
.ipv4
.ipv4_dst
) {
565 nl_msg_put_be32(buf
, OVS_TUNNEL_KEY_ATTR_IPV4_DST
,
566 action
->encap
.ipv4
.ipv4_dst
);
568 if (!is_all_zeros(&action
->encap
.ipv6
.ipv6_src
,
569 sizeof action
->encap
.ipv6
.ipv6_src
)) {
570 nl_msg_put_in6_addr(buf
, OVS_TUNNEL_KEY_ATTR_IPV6_SRC
,
571 &action
->encap
.ipv6
.ipv6_src
);
573 if (!is_all_zeros(&action
->encap
.ipv6
.ipv6_dst
,
574 sizeof action
->encap
.ipv6
.ipv6_dst
)) {
575 nl_msg_put_in6_addr(buf
, OVS_TUNNEL_KEY_ATTR_IPV6_DST
,
576 &action
->encap
.ipv6
.ipv6_dst
);
578 if (action
->encap
.tos
) {
579 nl_msg_put_u8(buf
, OVS_TUNNEL_KEY_ATTR_TOS
,
582 if (action
->encap
.ttl
) {
583 nl_msg_put_u8(buf
, OVS_TUNNEL_KEY_ATTR_TTL
,
586 nl_msg_put_be16(buf
, OVS_TUNNEL_KEY_ATTR_TP_DST
,
587 action
->encap
.tp_dst
);
589 nl_msg_end_nested(buf
, tunnel_offset
);
590 nl_msg_end_nested(buf
, set_offset
);
593 case TC_ACT_OUTPUT
: {
594 if (action
->ifindex_out
) {
595 outport
= netdev_ifindex_to_odp_port(action
->ifindex_out
);
600 nl_msg_put_u32(buf
, OVS_ACTION_ATTR_OUTPUT
, odp_to_u32(outport
));
606 nl_msg_end_nested(buf
, act_off
);
608 *actions
= ofpbuf_at_assert(buf
, act_off
, sizeof(struct nlattr
));
611 memset(stats
, 0, sizeof *stats
);
612 stats
->n_packets
= get_32aligned_u64(&flower
->stats
.n_packets
);
613 stats
->n_bytes
= get_32aligned_u64(&flower
->stats
.n_bytes
);
614 stats
->used
= flower
->lastused
;
617 attrs
->offloaded
= (flower
->offloaded_state
== TC_OFFLOADED_STATE_IN_HW
)
618 || (flower
->offloaded_state
== TC_OFFLOADED_STATE_UNDEFINED
);
619 attrs
->dp_layer
= "tc";
625 netdev_tc_flow_dump_next(struct netdev_flow_dump
*dump
,
627 struct nlattr
**actions
,
628 struct dpif_flow_stats
*stats
,
629 struct dpif_flow_attrs
*attrs
,
631 struct ofpbuf
*rbuffer
,
632 struct ofpbuf
*wbuffer
)
634 struct ofpbuf nl_flow
;
636 while (nl_dump_next(dump
->nl_dump
, &nl_flow
, rbuffer
)) {
637 struct tc_flower flower
;
638 struct netdev
*netdev
= dump
->netdev
;
640 if (parse_netlink_to_tc_flower(&nl_flow
, &flower
)) {
644 if (parse_tc_flower_to_match(&flower
, match
, actions
, stats
, attrs
,
649 if (flower
.act_cookie
.len
) {
650 *ufid
= *((ovs_u128
*) flower
.act_cookie
.data
);
651 } else if (!find_ufid(flower
.prio
, flower
.handle
, netdev
, ufid
)) {
655 match
->wc
.masks
.in_port
.odp_port
= u32_to_odp(UINT32_MAX
);
656 match
->flow
.in_port
.odp_port
= dump
->port
;
665 parse_put_flow_set_masked_action(struct tc_flower
*flower
,
666 struct tc_action
*action
,
667 const struct nlattr
*set
,
671 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(5, 20);
672 uint64_t set_stub
[1024 / 8];
673 struct ofpbuf set_buf
= OFPBUF_STUB_INITIALIZER(set_stub
);
674 char *set_data
, *set_mask
;
675 char *key
= (char *) &flower
->rewrite
.key
;
676 char *mask
= (char *) &flower
->rewrite
.mask
;
677 const struct nlattr
*attr
;
681 /* copy so we can set attr mask to 0 for used ovs key struct members */
682 attr
= ofpbuf_put(&set_buf
, set
, set_len
);
684 type
= nl_attr_type(attr
);
685 size
= nl_attr_get_size(attr
) / 2;
686 set_data
= CONST_CAST(char *, nl_attr_get(attr
));
687 set_mask
= set_data
+ size
;
689 if (type
>= ARRAY_SIZE(set_flower_map
)
690 || !set_flower_map
[type
][0].size
) {
691 VLOG_DBG_RL(&rl
, "unsupported set action type: %d", type
);
692 ofpbuf_uninit(&set_buf
);
696 for (i
= 0; i
< ARRAY_SIZE(set_flower_map
[type
]); i
++) {
697 struct netlink_field
*f
= &set_flower_map
[type
][i
];
703 /* copy masked value */
704 for (j
= 0; j
< f
->size
; j
++) {
705 char maskval
= hasmask
? set_mask
[f
->offset
+ j
] : 0xFF;
707 key
[f
->flower_offset
+ j
] = maskval
& set_data
[f
->offset
+ j
];
708 mask
[f
->flower_offset
+ j
] = maskval
;
712 /* set its mask to 0 to show it's been used. */
714 memset(set_mask
+ f
->offset
, 0, f
->size
);
718 if (!is_all_zeros(&flower
->rewrite
, sizeof flower
->rewrite
)) {
719 if (flower
->rewrite
.rewrite
== false) {
720 flower
->rewrite
.rewrite
= true;
721 action
->type
= TC_ACT_PEDIT
;
722 flower
->action_count
++;
726 if (hasmask
&& !is_all_zeros(set_mask
, size
)) {
727 VLOG_DBG_RL(&rl
, "unsupported sub attribute of set action type %d",
729 ofpbuf_uninit(&set_buf
);
733 ofpbuf_uninit(&set_buf
);
738 parse_put_flow_set_action(struct tc_flower
*flower
, struct tc_action
*action
,
739 const struct nlattr
*set
, size_t set_len
)
741 const struct nlattr
*tunnel
;
742 const struct nlattr
*tun_attr
;
743 size_t tun_left
, tunnel_len
;
745 if (nl_attr_type(set
) != OVS_KEY_ATTR_TUNNEL
) {
746 return parse_put_flow_set_masked_action(flower
, action
, set
,
750 tunnel
= nl_attr_get(set
);
751 tunnel_len
= nl_attr_get_size(set
);
753 action
->type
= TC_ACT_ENCAP
;
754 flower
->action_count
++;
755 NL_ATTR_FOR_EACH_UNSAFE(tun_attr
, tun_left
, tunnel
, tunnel_len
) {
756 switch (nl_attr_type(tun_attr
)) {
757 case OVS_TUNNEL_KEY_ATTR_ID
: {
758 action
->encap
.id
= nl_attr_get_be64(tun_attr
);
761 case OVS_TUNNEL_KEY_ATTR_IPV4_SRC
: {
762 action
->encap
.ipv4
.ipv4_src
= nl_attr_get_be32(tun_attr
);
765 case OVS_TUNNEL_KEY_ATTR_IPV4_DST
: {
766 action
->encap
.ipv4
.ipv4_dst
= nl_attr_get_be32(tun_attr
);
769 case OVS_TUNNEL_KEY_ATTR_TOS
: {
770 action
->encap
.tos
= nl_attr_get_u8(tun_attr
);
773 case OVS_TUNNEL_KEY_ATTR_TTL
: {
774 action
->encap
.ttl
= nl_attr_get_u8(tun_attr
);
777 case OVS_TUNNEL_KEY_ATTR_IPV6_SRC
: {
778 action
->encap
.ipv6
.ipv6_src
=
779 nl_attr_get_in6_addr(tun_attr
);
782 case OVS_TUNNEL_KEY_ATTR_IPV6_DST
: {
783 action
->encap
.ipv6
.ipv6_dst
=
784 nl_attr_get_in6_addr(tun_attr
);
787 case OVS_TUNNEL_KEY_ATTR_TP_SRC
: {
788 action
->encap
.tp_src
= nl_attr_get_be16(tun_attr
);
791 case OVS_TUNNEL_KEY_ATTR_TP_DST
: {
792 action
->encap
.tp_dst
= nl_attr_get_be16(tun_attr
);
802 test_key_and_mask(struct match
*match
)
804 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(5, 20);
805 const struct flow
*key
= &match
->flow
;
806 struct flow
*mask
= &match
->wc
.masks
;
808 if (mask
->pkt_mark
) {
809 VLOG_DBG_RL(&rl
, "offloading attribute pkt_mark isn't supported");
813 if (mask
->recirc_id
&& key
->recirc_id
) {
814 VLOG_DBG_RL(&rl
, "offloading attribute recirc_id isn't supported");
820 VLOG_DBG_RL(&rl
, "offloading attribute dp_hash isn't supported");
825 VLOG_DBG_RL(&rl
, "offloading attribute conj_id isn't supported");
829 if (mask
->skb_priority
) {
830 VLOG_DBG_RL(&rl
, "offloading attribute skb_priority isn't supported");
834 if (mask
->actset_output
) {
836 "offloading attribute actset_output isn't supported");
840 if (mask
->ct_state
) {
841 VLOG_DBG_RL(&rl
, "offloading attribute ct_state isn't supported");
846 VLOG_DBG_RL(&rl
, "offloading attribute ct_zone isn't supported");
851 VLOG_DBG_RL(&rl
, "offloading attribute ct_mark isn't supported");
855 if (mask
->packet_type
&& key
->packet_type
) {
856 VLOG_DBG_RL(&rl
, "offloading attribute packet_type isn't supported");
859 mask
->packet_type
= 0;
861 if (!ovs_u128_is_zero(mask
->ct_label
)) {
862 VLOG_DBG_RL(&rl
, "offloading attribute ct_label isn't supported");
866 for (int i
= 0; i
< FLOW_N_REGS
; i
++) {
869 "offloading attribute regs[%d] isn't supported", i
);
874 if (mask
->metadata
) {
875 VLOG_DBG_RL(&rl
, "offloading attribute metadata isn't supported");
880 VLOG_DBG_RL(&rl
, "offloading attribute nw_tos isn't supported");
884 for (int i
= 1; i
< FLOW_MAX_MPLS_LABELS
; i
++) {
885 if (mask
->mpls_lse
[i
]) {
886 VLOG_DBG_RL(&rl
, "offloading multiple mpls_lses isn't supported");
891 if (key
->dl_type
== htons(ETH_TYPE_IP
) &&
892 key
->nw_proto
== IPPROTO_ICMP
) {
895 "offloading attribute icmp_type isn't supported");
900 "offloading attribute icmp_code isn't supported");
903 } else if (key
->dl_type
== htons(ETH_TYPE_IP
) &&
904 key
->nw_proto
== IPPROTO_IGMP
) {
907 "offloading attribute igmp_type isn't supported");
912 "offloading attribute igmp_code isn't supported");
915 } else if (key
->dl_type
== htons(ETH_TYPE_IPV6
) &&
916 key
->nw_proto
== IPPROTO_ICMPV6
) {
919 "offloading attribute icmp_type isn't supported");
924 "offloading attribute icmp_code isn't supported");
927 } else if (key
->dl_type
== htons(OFP_DL_TYPE_NOT_ETH_TYPE
)) {
929 "offloading of non-ethernet packets isn't supported");
933 if (!is_all_zeros(mask
, sizeof *mask
)) {
934 VLOG_DBG_RL(&rl
, "offloading isn't supported, unknown attribute");
942 netdev_tc_flow_put(struct netdev
*netdev
, struct match
*match
,
943 struct nlattr
*actions
, size_t actions_len
,
944 const ovs_u128
*ufid
, struct offload_info
*info
,
945 struct dpif_flow_stats
*stats OVS_UNUSED
)
947 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(5, 20);
948 struct tc_flower flower
;
949 const struct flow
*key
= &match
->flow
;
950 struct flow
*mask
= &match
->wc
.masks
;
951 const struct flow_tnl
*tnl
= &match
->flow
.tunnel
;
952 const struct flow_tnl
*tnl_mask
= &mask
->tunnel
;
953 struct tc_action
*action
;
954 uint32_t block_id
= 0;
962 ifindex
= netdev_get_ifindex(netdev
);
964 VLOG_ERR_RL(&error_rl
, "flow_put: failed to get ifindex for %s: %s",
965 netdev_get_name(netdev
), ovs_strerror(-ifindex
));
969 memset(&flower
, 0, sizeof flower
);
971 if (flow_tnl_dst_is_set(&key
->tunnel
)) {
973 "tunnel: id %#" PRIx64
" src " IP_FMT
974 " dst " IP_FMT
" tp_src %d tp_dst %d",
976 IP_ARGS(tnl
->ip_src
), IP_ARGS(tnl
->ip_dst
),
977 ntohs(tnl
->tp_src
), ntohs(tnl
->tp_dst
));
978 flower
.key
.tunnel
.id
= tnl
->tun_id
;
979 flower
.key
.tunnel
.ipv4
.ipv4_src
= tnl
->ip_src
;
980 flower
.key
.tunnel
.ipv4
.ipv4_dst
= tnl
->ip_dst
;
981 flower
.key
.tunnel
.ipv6
.ipv6_src
= tnl
->ipv6_src
;
982 flower
.key
.tunnel
.ipv6
.ipv6_dst
= tnl
->ipv6_dst
;
983 flower
.key
.tunnel
.tos
= tnl
->ip_tos
;
984 flower
.key
.tunnel
.ttl
= tnl
->ip_ttl
;
985 flower
.key
.tunnel
.tp_src
= tnl
->tp_src
;
986 flower
.key
.tunnel
.tp_dst
= tnl
->tp_dst
;
987 flower
.mask
.tunnel
.tos
= tnl_mask
->ip_tos
;
988 flower
.mask
.tunnel
.ttl
= tnl_mask
->ip_ttl
;
989 flower
.tunnel
= true;
991 memset(&mask
->tunnel
, 0, sizeof mask
->tunnel
);
993 flower
.key
.eth_type
= key
->dl_type
;
994 flower
.mask
.eth_type
= mask
->dl_type
;
995 if (mask
->mpls_lse
[0]) {
996 flower
.key
.mpls_lse
= key
->mpls_lse
[0];
997 flower
.mask
.mpls_lse
= mask
->mpls_lse
[0];
998 flower
.key
.encap_eth_type
[0] = flower
.key
.eth_type
;
1000 mask
->mpls_lse
[0] = 0;
1002 if (mask
->vlans
[0].tci
) {
1003 ovs_be16 vid_mask
= mask
->vlans
[0].tci
& htons(VLAN_VID_MASK
);
1004 ovs_be16 pcp_mask
= mask
->vlans
[0].tci
& htons(VLAN_PCP_MASK
);
1005 ovs_be16 cfi
= mask
->vlans
[0].tci
& htons(VLAN_CFI
);
1007 if (cfi
&& key
->vlans
[0].tci
& htons(VLAN_CFI
)
1008 && (!vid_mask
|| vid_mask
== htons(VLAN_VID_MASK
))
1009 && (!pcp_mask
|| pcp_mask
== htons(VLAN_PCP_MASK
))
1010 && (vid_mask
|| pcp_mask
)) {
1012 flower
.key
.vlan_id
[0] = vlan_tci_to_vid(key
->vlans
[0].tci
);
1013 flower
.mask
.vlan_id
[0] = vlan_tci_to_vid(mask
->vlans
[0].tci
);
1014 VLOG_DBG_RL(&rl
, "vlan_id[0]: %d\n", flower
.key
.vlan_id
[0]);
1017 flower
.key
.vlan_prio
[0] = vlan_tci_to_pcp(key
->vlans
[0].tci
);
1018 flower
.mask
.vlan_prio
[0] = vlan_tci_to_pcp(mask
->vlans
[0].tci
);
1019 VLOG_DBG_RL(&rl
, "vlan_prio[0]: %d\n",
1020 flower
.key
.vlan_prio
[0]);
1022 flower
.key
.encap_eth_type
[0] = flower
.key
.eth_type
;
1023 flower
.key
.eth_type
= key
->vlans
[0].tpid
;
1024 } else if (mask
->vlans
[0].tci
== htons(0xffff) &&
1025 ntohs(key
->vlans
[0].tci
) == 0) {
1026 /* exact && no vlan */
1033 if (mask
->vlans
[1].tci
) {
1034 ovs_be16 vid_mask
= mask
->vlans
[1].tci
& htons(VLAN_VID_MASK
);
1035 ovs_be16 pcp_mask
= mask
->vlans
[1].tci
& htons(VLAN_PCP_MASK
);
1036 ovs_be16 cfi
= mask
->vlans
[1].tci
& htons(VLAN_CFI
);
1038 if (cfi
&& key
->vlans
[1].tci
& htons(VLAN_CFI
)
1039 && (!vid_mask
|| vid_mask
== htons(VLAN_VID_MASK
))
1040 && (!pcp_mask
|| pcp_mask
== htons(VLAN_PCP_MASK
))
1041 && (vid_mask
|| pcp_mask
)) {
1043 flower
.key
.vlan_id
[1] = vlan_tci_to_vid(key
->vlans
[1].tci
);
1044 flower
.mask
.vlan_id
[1] = vlan_tci_to_vid(mask
->vlans
[1].tci
);
1045 VLOG_DBG_RL(&rl
, "vlan_id[1]: %d", flower
.key
.vlan_id
[1]);
1048 flower
.key
.vlan_prio
[1] = vlan_tci_to_pcp(key
->vlans
[1].tci
);
1049 flower
.mask
.vlan_prio
[1] = vlan_tci_to_pcp(mask
->vlans
[1].tci
);
1050 VLOG_DBG_RL(&rl
, "vlan_prio[1]: %d", flower
.key
.vlan_prio
[1]);
1052 flower
.key
.encap_eth_type
[1] = flower
.key
.encap_eth_type
[0];
1053 flower
.key
.encap_eth_type
[0] = key
->vlans
[1].tpid
;
1054 } else if (mask
->vlans
[1].tci
== htons(0xffff) &&
1055 ntohs(key
->vlans
[1].tci
) == 0) {
1056 /* exact && no vlan */
1062 memset(mask
->vlans
, 0, sizeof mask
->vlans
);
1064 flower
.key
.dst_mac
= key
->dl_dst
;
1065 flower
.mask
.dst_mac
= mask
->dl_dst
;
1066 flower
.key
.src_mac
= key
->dl_src
;
1067 flower
.mask
.src_mac
= mask
->dl_src
;
1068 memset(&mask
->dl_dst
, 0, sizeof mask
->dl_dst
);
1069 memset(&mask
->dl_src
, 0, sizeof mask
->dl_src
);
1071 mask
->in_port
.odp_port
= 0;
1073 if (is_ip_any(key
)) {
1074 flower
.key
.ip_proto
= key
->nw_proto
;
1075 flower
.mask
.ip_proto
= mask
->nw_proto
;
1077 flower
.key
.ip_tos
= key
->nw_tos
;
1078 flower
.mask
.ip_tos
= mask
->nw_tos
;
1080 flower
.key
.ip_ttl
= key
->nw_ttl
;
1081 flower
.mask
.ip_ttl
= mask
->nw_ttl
;
1084 if (mask
->nw_frag
& FLOW_NW_FRAG_ANY
) {
1085 flower
.mask
.flags
|= TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT
;
1087 if (key
->nw_frag
& FLOW_NW_FRAG_ANY
) {
1088 flower
.key
.flags
|= TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT
;
1090 if (mask
->nw_frag
& FLOW_NW_FRAG_LATER
) {
1091 flower
.mask
.flags
|= TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST
;
1093 if (!(key
->nw_frag
& FLOW_NW_FRAG_LATER
)) {
1094 flower
.key
.flags
|= TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST
;
1102 if (key
->nw_proto
== IPPROTO_TCP
) {
1103 flower
.key
.tcp_dst
= key
->tp_dst
;
1104 flower
.mask
.tcp_dst
= mask
->tp_dst
;
1105 flower
.key
.tcp_src
= key
->tp_src
;
1106 flower
.mask
.tcp_src
= mask
->tp_src
;
1107 flower
.key
.tcp_flags
= key
->tcp_flags
;
1108 flower
.mask
.tcp_flags
= mask
->tcp_flags
;
1111 mask
->tcp_flags
= 0;
1112 } else if (key
->nw_proto
== IPPROTO_UDP
) {
1113 flower
.key
.udp_dst
= key
->tp_dst
;
1114 flower
.mask
.udp_dst
= mask
->tp_dst
;
1115 flower
.key
.udp_src
= key
->tp_src
;
1116 flower
.mask
.udp_src
= mask
->tp_src
;
1119 } else if (key
->nw_proto
== IPPROTO_SCTP
) {
1120 flower
.key
.sctp_dst
= key
->tp_dst
;
1121 flower
.mask
.sctp_dst
= mask
->tp_dst
;
1122 flower
.key
.sctp_src
= key
->tp_src
;
1123 flower
.mask
.sctp_src
= mask
->tp_src
;
1128 if (key
->dl_type
== htons(ETH_P_IP
)) {
1129 flower
.key
.ipv4
.ipv4_src
= key
->nw_src
;
1130 flower
.mask
.ipv4
.ipv4_src
= mask
->nw_src
;
1131 flower
.key
.ipv4
.ipv4_dst
= key
->nw_dst
;
1132 flower
.mask
.ipv4
.ipv4_dst
= mask
->nw_dst
;
1135 } else if (key
->dl_type
== htons(ETH_P_IPV6
)) {
1136 flower
.key
.ipv6
.ipv6_src
= key
->ipv6_src
;
1137 flower
.mask
.ipv6
.ipv6_src
= mask
->ipv6_src
;
1138 flower
.key
.ipv6
.ipv6_dst
= key
->ipv6_dst
;
1139 flower
.mask
.ipv6
.ipv6_dst
= mask
->ipv6_dst
;
1140 memset(&mask
->ipv6_src
, 0, sizeof mask
->ipv6_src
);
1141 memset(&mask
->ipv6_dst
, 0, sizeof mask
->ipv6_dst
);
1145 err
= test_key_and_mask(match
);
1150 NL_ATTR_FOR_EACH(nla
, left
, actions
, actions_len
) {
1151 if (flower
.action_count
>= TCA_ACT_MAX_PRIO
) {
1152 VLOG_DBG_RL(&rl
, "Can only support %d actions", flower
.action_count
);
1155 action
= &flower
.actions
[flower
.action_count
];
1156 if (nl_attr_type(nla
) == OVS_ACTION_ATTR_OUTPUT
) {
1157 odp_port_t port
= nl_attr_get_odp_port(nla
);
1158 struct netdev
*outdev
= netdev_ports_get(port
, info
->dpif_class
);
1160 action
->ifindex_out
= netdev_get_ifindex(outdev
);
1161 action
->type
= TC_ACT_OUTPUT
;
1162 flower
.action_count
++;
1163 netdev_close(outdev
);
1164 } else if (nl_attr_type(nla
) == OVS_ACTION_ATTR_PUSH_VLAN
) {
1165 const struct ovs_action_push_vlan
*vlan_push
= nl_attr_get(nla
);
1167 action
->vlan
.vlan_push_tpid
= vlan_push
->vlan_tpid
;
1168 action
->vlan
.vlan_push_id
= vlan_tci_to_vid(vlan_push
->vlan_tci
);
1169 action
->vlan
.vlan_push_prio
= vlan_tci_to_pcp(vlan_push
->vlan_tci
);
1170 action
->type
= TC_ACT_VLAN_PUSH
;
1171 flower
.action_count
++;
1172 } else if (nl_attr_type(nla
) == OVS_ACTION_ATTR_POP_VLAN
) {
1173 action
->type
= TC_ACT_VLAN_POP
;
1174 flower
.action_count
++;
1175 } else if (nl_attr_type(nla
) == OVS_ACTION_ATTR_SET
) {
1176 const struct nlattr
*set
= nl_attr_get(nla
);
1177 const size_t set_len
= nl_attr_get_size(nla
);
1179 err
= parse_put_flow_set_action(&flower
, action
, set
, set_len
);
1183 if (action
->type
== TC_ACT_ENCAP
) {
1184 action
->encap
.tp_dst
= info
->tp_dst_port
;
1186 } else if (nl_attr_type(nla
) == OVS_ACTION_ATTR_SET_MASKED
) {
1187 const struct nlattr
*set
= nl_attr_get(nla
);
1188 const size_t set_len
= nl_attr_get_size(nla
);
1190 err
= parse_put_flow_set_masked_action(&flower
, action
, set
,
1196 VLOG_DBG_RL(&rl
, "unsupported put action type: %d",
1202 block_id
= get_block_id_from_netdev(netdev
);
1203 handle
= get_ufid_tc_mapping(ufid
, &prio
, NULL
);
1204 if (handle
&& prio
) {
1205 VLOG_DBG_RL(&rl
, "updating old handle: %d prio: %d", handle
, prio
);
1206 tc_del_filter(ifindex
, prio
, handle
, block_id
);
1210 prio
= get_prio_for_tc_flower(&flower
);
1212 VLOG_ERR_RL(&rl
, "couldn't get tc prio: %s", ovs_strerror(ENOSPC
));
1217 flower
.act_cookie
.data
= ufid
;
1218 flower
.act_cookie
.len
= sizeof *ufid
;
1220 err
= tc_replace_flower(ifindex
, prio
, handle
, &flower
, block_id
);
1222 add_ufid_tc_mapping(ufid
, flower
.prio
, flower
.handle
, netdev
, ifindex
);
1229 netdev_tc_flow_get(struct netdev
*netdev OVS_UNUSED
,
1230 struct match
*match
,
1231 struct nlattr
**actions
,
1232 const ovs_u128
*ufid
,
1233 struct dpif_flow_stats
*stats
,
1234 struct dpif_flow_attrs
*attrs
,
1237 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(5, 20);
1239 struct tc_flower flower
;
1240 uint32_t block_id
= 0;
1247 handle
= get_ufid_tc_mapping(ufid
, &prio
, &dev
);
1252 ifindex
= netdev_get_ifindex(dev
);
1254 VLOG_ERR_RL(&error_rl
, "flow_get: failed to get ifindex for %s: %s",
1255 netdev_get_name(dev
), ovs_strerror(-ifindex
));
1260 VLOG_DBG_RL(&rl
, "flow get (dev %s prio %d handle %d)",
1261 netdev_get_name(dev
), prio
, handle
);
1262 block_id
= get_block_id_from_netdev(netdev
);
1263 err
= tc_get_flower(ifindex
, prio
, handle
, &flower
, block_id
);
1266 VLOG_ERR_RL(&error_rl
, "flow get failed (dev %s prio %d handle %d): %s",
1267 netdev_get_name(dev
), prio
, handle
, ovs_strerror(err
));
1271 in_port
= netdev_ifindex_to_odp_port(ifindex
);
1272 parse_tc_flower_to_match(&flower
, match
, actions
, stats
, attrs
, buf
);
1274 match
->wc
.masks
.in_port
.odp_port
= u32_to_odp(UINT32_MAX
);
1275 match
->flow
.in_port
.odp_port
= in_port
;
1281 netdev_tc_flow_del(struct netdev
*netdev OVS_UNUSED
,
1282 const ovs_u128
*ufid
,
1283 struct dpif_flow_stats
*stats
)
1285 struct tc_flower flower
;
1286 uint32_t block_id
= 0;
1293 handle
= get_ufid_tc_mapping(ufid
, &prio
, &dev
);
1298 ifindex
= netdev_get_ifindex(dev
);
1300 VLOG_ERR_RL(&error_rl
, "flow_del: failed to get ifindex for %s: %s",
1301 netdev_get_name(dev
), ovs_strerror(-ifindex
));
1306 block_id
= get_block_id_from_netdev(netdev
);
1309 memset(stats
, 0, sizeof *stats
);
1310 if (!tc_get_flower(ifindex
, prio
, handle
, &flower
, block_id
)) {
1311 stats
->n_packets
= get_32aligned_u64(&flower
.stats
.n_packets
);
1312 stats
->n_bytes
= get_32aligned_u64(&flower
.stats
.n_bytes
);
1313 stats
->used
= flower
.lastused
;
1317 error
= tc_del_filter(ifindex
, prio
, handle
, block_id
);
1318 del_ufid_tc_mapping(ufid
);
1326 probe_multi_mask_per_prio(int ifindex
)
1328 struct tc_flower flower
;
1332 error
= tc_add_del_ingress_qdisc(ifindex
, true, block_id
);
1337 memset(&flower
, 0, sizeof flower
);
1339 flower
.key
.eth_type
= htons(ETH_P_IP
);
1340 flower
.mask
.eth_type
= OVS_BE16_MAX
;
1341 memset(&flower
.key
.dst_mac
, 0x11, sizeof flower
.key
.dst_mac
);
1342 memset(&flower
.mask
.dst_mac
, 0xff, sizeof flower
.mask
.dst_mac
);
1344 error
= tc_replace_flower(ifindex
, 1, 1, &flower
, block_id
);
1349 memset(&flower
.key
.src_mac
, 0x11, sizeof flower
.key
.src_mac
);
1350 memset(&flower
.mask
.src_mac
, 0xff, sizeof flower
.mask
.src_mac
);
1352 error
= tc_replace_flower(ifindex
, 1, 2, &flower
, block_id
);
1353 tc_del_filter(ifindex
, 1, 1, block_id
);
1359 tc_del_filter(ifindex
, 1, 2, block_id
);
1361 multi_mask_per_prio
= true;
1362 VLOG_INFO("probe tc: multiple masks on single tc prio is supported.");
1365 tc_add_del_ingress_qdisc(ifindex
, false, block_id
);
1369 probe_tc_block_support(int ifindex
)
1371 uint32_t block_id
= 1;
1374 error
= tc_add_del_ingress_qdisc(ifindex
, true, block_id
);
1379 tc_add_del_ingress_qdisc(ifindex
, false, block_id
);
1381 block_support
= true;
1382 VLOG_INFO("probe tc: block offload is supported.");
1386 netdev_tc_init_flow_api(struct netdev
*netdev
)
1388 static struct ovsthread_once multi_mask_once
= OVSTHREAD_ONCE_INITIALIZER
;
1389 static struct ovsthread_once block_once
= OVSTHREAD_ONCE_INITIALIZER
;
1390 uint32_t block_id
= 0;
1394 ifindex
= netdev_get_ifindex(netdev
);
1396 VLOG_ERR_RL(&error_rl
, "init: failed to get ifindex for %s: %s",
1397 netdev_get_name(netdev
), ovs_strerror(-ifindex
));
1401 if (ovsthread_once_start(&block_once
)) {
1402 probe_tc_block_support(ifindex
);
1403 ovsthread_once_done(&block_once
);
1406 if (ovsthread_once_start(&multi_mask_once
)) {
1407 probe_multi_mask_per_prio(ifindex
);
1408 ovsthread_once_done(&multi_mask_once
);
1411 block_id
= get_block_id_from_netdev(netdev
);
1412 error
= tc_add_del_ingress_qdisc(ifindex
, true, block_id
);
1414 if (error
&& error
!= EEXIST
) {
1415 VLOG_ERR("failed adding ingress qdisc required for offloading: %s",
1416 ovs_strerror(error
));
1420 VLOG_INFO("added ingress qdisc to %s", netdev_get_name(netdev
));