2 * Copyright (c) 2016 Mellanox Technologies, Ltd.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 #include "netdev-tc-offloads.h"
21 #include <linux/if_ether.h>
25 #include "openvswitch/hmap.h"
26 #include "openvswitch/match.h"
27 #include "openvswitch/ofpbuf.h"
28 #include "openvswitch/thread.h"
29 #include "openvswitch/types.h"
30 #include "openvswitch/vlog.h"
31 #include "netdev-linux.h"
33 #include "netlink-socket.h"
34 #include "odp-netlink.h"
36 #include "unaligned.h"
39 VLOG_DEFINE_THIS_MODULE(netdev_tc_offloads
);
41 static struct vlog_rate_limit error_rl
= VLOG_RATE_LIMIT_INIT(60, 5);
43 static struct hmap ufid_tc
= HMAP_INITIALIZER(&ufid_tc
);
44 static struct ovs_mutex ufid_lock
= OVS_MUTEX_INITIALIZER
;
47 * struct ufid_tc_data - data entry for ufid_tc hmap.
48 * @ufid_node: Element in @ufid_tc hash table by ufid key.
49 * @tc_node: Element in @ufid_tc hash table by prio/handle/ifindex key.
50 * @ufid: ufid assigned to the flow
53 * @ifindex: netdev ifindex.
54 * @netdev: netdev associated with the tc rule
57 struct hmap_node ufid_node
;
58 struct hmap_node tc_node
;
63 struct netdev
*netdev
;
66 /* Remove matching ufid entry from ufid_tc hashmap. */
68 del_ufid_tc_mapping(const ovs_u128
*ufid
)
70 size_t ufid_hash
= hash_bytes(ufid
, sizeof *ufid
, 0);
71 struct ufid_tc_data
*data
;
73 ovs_mutex_lock(&ufid_lock
);
74 HMAP_FOR_EACH_WITH_HASH(data
, ufid_node
, ufid_hash
, &ufid_tc
) {
75 if (ovs_u128_equals(*ufid
, data
->ufid
)) {
81 ovs_mutex_unlock(&ufid_lock
);
85 hmap_remove(&ufid_tc
, &data
->ufid_node
);
86 hmap_remove(&ufid_tc
, &data
->tc_node
);
87 netdev_close(data
->netdev
);
89 ovs_mutex_unlock(&ufid_lock
);
92 /* Add ufid entry to ufid_tc hashmap.
93 * If entry exists already it will be replaced. */
95 add_ufid_tc_mapping(const ovs_u128
*ufid
, int prio
, int handle
,
96 struct netdev
*netdev
, int ifindex
)
98 size_t ufid_hash
= hash_bytes(ufid
, sizeof *ufid
, 0);
99 size_t tc_hash
= hash_int(hash_int(prio
, handle
), ifindex
);
100 struct ufid_tc_data
*new_data
= xzalloc(sizeof *new_data
);
102 del_ufid_tc_mapping(ufid
);
104 new_data
->ufid
= *ufid
;
105 new_data
->prio
= prio
;
106 new_data
->handle
= handle
;
107 new_data
->netdev
= netdev_ref(netdev
);
108 new_data
->ifindex
= ifindex
;
110 ovs_mutex_lock(&ufid_lock
);
111 hmap_insert(&ufid_tc
, &new_data
->ufid_node
, ufid_hash
);
112 hmap_insert(&ufid_tc
, &new_data
->tc_node
, tc_hash
);
113 ovs_mutex_unlock(&ufid_lock
);
116 /* Get ufid from ufid_tc hashmap.
118 * If netdev output param is not NULL then the function will return
119 * associated netdev on success and a refcount is taken on that netdev.
120 * The caller is then responsible to close the netdev.
122 * Returns handle if successful and fill prio and netdev for that ufid.
123 * Otherwise returns 0.
126 get_ufid_tc_mapping(const ovs_u128
*ufid
, int *prio
, struct netdev
**netdev
)
128 size_t ufid_hash
= hash_bytes(ufid
, sizeof *ufid
, 0);
129 struct ufid_tc_data
*data
;
132 ovs_mutex_lock(&ufid_lock
);
133 HMAP_FOR_EACH_WITH_HASH(data
, ufid_node
, ufid_hash
, &ufid_tc
) {
134 if (ovs_u128_equals(*ufid
, data
->ufid
)) {
139 *netdev
= netdev_ref(data
->netdev
);
141 handle
= data
->handle
;
145 ovs_mutex_unlock(&ufid_lock
);
150 /* Find ufid entry in ufid_tc hashmap using prio, handle and netdev.
151 * The result is saved in ufid.
153 * Returns true on success.
156 find_ufid(int prio
, int handle
, struct netdev
*netdev
, ovs_u128
*ufid
)
158 int ifindex
= netdev_get_ifindex(netdev
);
159 struct ufid_tc_data
*data
;
160 size_t tc_hash
= hash_int(hash_int(prio
, handle
), ifindex
);
162 ovs_mutex_lock(&ufid_lock
);
163 HMAP_FOR_EACH_WITH_HASH(data
, tc_node
, tc_hash
, &ufid_tc
) {
164 if (data
->prio
== prio
&& data
->handle
== handle
165 && data
->ifindex
== ifindex
) {
170 ovs_mutex_unlock(&ufid_lock
);
172 return (data
!= NULL
);
175 struct prio_map_data
{
176 struct hmap_node node
;
177 struct tc_flower_key mask
;
182 /* Get free prio for tc flower
183 * If prio is already allocated for mask/eth_type combination then return it.
184 * If not assign new prio.
186 * Return prio on success or 0 if we are out of prios.
189 get_prio_for_tc_flower(struct tc_flower
*flower
)
191 static struct hmap prios
= HMAP_INITIALIZER(&prios
);
192 static struct ovs_mutex prios_lock
= OVS_MUTEX_INITIALIZER
;
193 static uint16_t last_prio
= 0;
194 size_t key_len
= sizeof(struct tc_flower_key
);
195 size_t hash
= hash_bytes(&flower
->mask
, key_len
,
196 (OVS_FORCE
uint32_t) flower
->key
.eth_type
);
197 struct prio_map_data
*data
;
198 struct prio_map_data
*new_data
;
200 /* We can use the same prio for same mask/eth combination but must have
201 * different prio if not. Flower classifier will reject same prio for
202 * different mask/eth combination. */
203 ovs_mutex_lock(&prios_lock
);
204 HMAP_FOR_EACH_WITH_HASH(data
, node
, hash
, &prios
) {
205 if (!memcmp(&flower
->mask
, &data
->mask
, key_len
)
206 && data
->protocol
== flower
->key
.eth_type
) {
207 ovs_mutex_unlock(&prios_lock
);
212 if (last_prio
== UINT16_MAX
) {
213 /* last_prio can overflow if there will be many different kinds of
214 * flows which shouldn't happen organically. */
215 ovs_mutex_unlock(&prios_lock
);
219 new_data
= xzalloc(sizeof *new_data
);
220 memcpy(&new_data
->mask
, &flower
->mask
, key_len
);
221 new_data
->prio
= ++last_prio
;
222 new_data
->protocol
= flower
->key
.eth_type
;
223 hmap_insert(&prios
, &new_data
->node
, hash
);
224 ovs_mutex_unlock(&prios_lock
);
226 return new_data
->prio
;
230 netdev_tc_flow_flush(struct netdev
*netdev
)
232 int ifindex
= netdev_get_ifindex(netdev
);
235 VLOG_ERR_RL(&error_rl
, "failed to get ifindex for %s: %s",
236 netdev_get_name(netdev
), ovs_strerror(-ifindex
));
240 return tc_flush(ifindex
);
244 netdev_tc_flow_dump_create(struct netdev
*netdev
,
245 struct netdev_flow_dump
**dump_out
)
247 struct netdev_flow_dump
*dump
;
250 ifindex
= netdev_get_ifindex(netdev
);
252 VLOG_ERR_RL(&error_rl
, "failed to get ifindex for %s: %s",
253 netdev_get_name(netdev
), ovs_strerror(-ifindex
));
257 dump
= xzalloc(sizeof *dump
);
258 dump
->nl_dump
= xzalloc(sizeof *dump
->nl_dump
);
259 dump
->netdev
= netdev_ref(netdev
);
260 tc_dump_flower_start(ifindex
, dump
->nl_dump
);
268 netdev_tc_flow_dump_destroy(struct netdev_flow_dump
*dump
)
270 nl_dump_done(dump
->nl_dump
);
271 netdev_close(dump
->netdev
);
278 parse_tc_flower_to_match(struct tc_flower
*flower
,
280 struct nlattr
**actions
,
281 struct dpif_flow_stats
*stats
,
282 struct ofpbuf
*buf
) {
284 struct tc_flower_key
*key
= &flower
->key
;
285 struct tc_flower_key
*mask
= &flower
->mask
;
286 odp_port_t outport
= 0;
288 if (flower
->ifindex_out
) {
289 outport
= netdev_ifindex_to_odp_port(flower
->ifindex_out
);
297 match_init_catchall(match
);
298 match_set_dl_src_masked(match
, key
->src_mac
, mask
->src_mac
);
299 match_set_dl_dst_masked(match
, key
->dst_mac
, mask
->dst_mac
);
301 if (key
->eth_type
== htons(ETH_TYPE_VLAN
)) {
302 match_set_dl_vlan(match
, htons(key
->vlan_id
));
303 match_set_dl_vlan_pcp(match
, key
->vlan_prio
);
304 match_set_dl_type(match
, key
->encap_eth_type
);
305 flow_fix_vlan_tpid(&match
->flow
);
307 match_set_dl_type(match
, key
->eth_type
);
310 if (is_ip_any(&match
->flow
)) {
312 match_set_nw_proto(match
, key
->ip_proto
);
315 match_set_nw_ttl_masked(match
, key
->ip_ttl
, mask
->ip_ttl
);
317 match_set_nw_src_masked(match
, key
->ipv4
.ipv4_src
, mask
->ipv4
.ipv4_src
);
318 match_set_nw_dst_masked(match
, key
->ipv4
.ipv4_dst
, mask
->ipv4
.ipv4_dst
);
320 match_set_ipv6_src_masked(match
,
321 &key
->ipv6
.ipv6_src
, &mask
->ipv6
.ipv6_src
);
322 match_set_ipv6_dst_masked(match
,
323 &key
->ipv6
.ipv6_dst
, &mask
->ipv6
.ipv6_dst
);
325 if (key
->ip_proto
== IPPROTO_TCP
) {
326 match_set_tp_dst_masked(match
, key
->tcp_dst
, mask
->tcp_dst
);
327 match_set_tp_src_masked(match
, key
->tcp_src
, mask
->tcp_src
);
328 match_set_tcp_flags_masked(match
, key
->tcp_flags
, mask
->tcp_flags
);
329 } else if (key
->ip_proto
== IPPROTO_UDP
) {
330 match_set_tp_dst_masked(match
, key
->udp_dst
, mask
->udp_dst
);
331 match_set_tp_src_masked(match
, key
->udp_src
, mask
->udp_src
);
332 } else if (key
->ip_proto
== IPPROTO_SCTP
) {
333 match_set_tp_dst_masked(match
, key
->sctp_dst
, mask
->sctp_dst
);
334 match_set_tp_src_masked(match
, key
->sctp_src
, mask
->sctp_src
);
338 if (flower
->tunnel
.tunnel
) {
339 match_set_tun_id(match
, flower
->tunnel
.id
);
340 if (flower
->tunnel
.ipv4
.ipv4_dst
) {
341 match_set_tun_src(match
, flower
->tunnel
.ipv4
.ipv4_src
);
342 match_set_tun_dst(match
, flower
->tunnel
.ipv4
.ipv4_dst
);
343 } else if (!is_all_zeros(&flower
->tunnel
.ipv6
.ipv6_dst
,
344 sizeof flower
->tunnel
.ipv6
.ipv6_dst
)) {
345 match_set_tun_ipv6_src(match
, &flower
->tunnel
.ipv6
.ipv6_src
);
346 match_set_tun_ipv6_dst(match
, &flower
->tunnel
.ipv6
.ipv6_dst
);
348 if (flower
->tunnel
.tp_dst
) {
349 match_set_tun_tp_dst(match
, flower
->tunnel
.tp_dst
);
353 act_off
= nl_msg_start_nested(buf
, OVS_FLOW_ATTR_ACTIONS
);
355 if (flower
->vlan_pop
) {
356 nl_msg_put_flag(buf
, OVS_ACTION_ATTR_POP_VLAN
);
359 if (flower
->vlan_push_id
|| flower
->vlan_push_prio
) {
360 struct ovs_action_push_vlan
*push
;
361 push
= nl_msg_put_unspec_zero(buf
, OVS_ACTION_ATTR_PUSH_VLAN
,
364 push
->vlan_tpid
= htons(ETH_TYPE_VLAN
);
365 push
->vlan_tci
= htons(flower
->vlan_push_id
366 | (flower
->vlan_push_prio
<< 13)
370 if (flower
->set
.set
) {
371 size_t set_offset
= nl_msg_start_nested(buf
, OVS_ACTION_ATTR_SET
);
372 size_t tunnel_offset
=
373 nl_msg_start_nested(buf
, OVS_KEY_ATTR_TUNNEL
);
375 nl_msg_put_be64(buf
, OVS_TUNNEL_KEY_ATTR_ID
, flower
->set
.id
);
376 if (flower
->set
.ipv4
.ipv4_src
) {
377 nl_msg_put_be32(buf
, OVS_TUNNEL_KEY_ATTR_IPV4_SRC
,
378 flower
->set
.ipv4
.ipv4_src
);
380 if (flower
->set
.ipv4
.ipv4_dst
) {
381 nl_msg_put_be32(buf
, OVS_TUNNEL_KEY_ATTR_IPV4_DST
,
382 flower
->set
.ipv4
.ipv4_dst
);
384 if (!is_all_zeros(&flower
->set
.ipv6
.ipv6_src
,
385 sizeof flower
->set
.ipv6
.ipv6_src
)) {
386 nl_msg_put_in6_addr(buf
, OVS_TUNNEL_KEY_ATTR_IPV6_SRC
,
387 &flower
->set
.ipv6
.ipv6_src
);
389 if (!is_all_zeros(&flower
->set
.ipv6
.ipv6_dst
,
390 sizeof flower
->set
.ipv6
.ipv6_dst
)) {
391 nl_msg_put_in6_addr(buf
, OVS_TUNNEL_KEY_ATTR_IPV6_DST
,
392 &flower
->set
.ipv6
.ipv6_dst
);
394 nl_msg_put_be16(buf
, OVS_TUNNEL_KEY_ATTR_TP_DST
,
397 nl_msg_end_nested(buf
, tunnel_offset
);
398 nl_msg_end_nested(buf
, set_offset
);
401 if (flower
->ifindex_out
> 0) {
402 nl_msg_put_u32(buf
, OVS_ACTION_ATTR_OUTPUT
, odp_to_u32(outport
));
406 nl_msg_end_nested(buf
, act_off
);
408 *actions
= ofpbuf_at_assert(buf
, act_off
, sizeof(struct nlattr
));
411 memset(stats
, 0, sizeof *stats
);
412 stats
->n_packets
= get_32aligned_u64(&flower
->stats
.n_packets
);
413 stats
->n_bytes
= get_32aligned_u64(&flower
->stats
.n_bytes
);
414 stats
->used
= flower
->lastused
;
421 netdev_tc_flow_dump_next(struct netdev_flow_dump
*dump
,
423 struct nlattr
**actions
,
424 struct dpif_flow_stats
*stats
,
426 struct ofpbuf
*rbuffer
,
427 struct ofpbuf
*wbuffer
)
429 struct ofpbuf nl_flow
;
431 while (nl_dump_next(dump
->nl_dump
, &nl_flow
, rbuffer
)) {
432 struct tc_flower flower
;
433 struct netdev
*netdev
= dump
->netdev
;
435 if (parse_netlink_to_tc_flower(&nl_flow
, &flower
)) {
439 if (parse_tc_flower_to_match(&flower
, match
, actions
, stats
,
444 if (flower
.act_cookie
.len
) {
445 *ufid
= *((ovs_u128
*) flower
.act_cookie
.data
);
446 } else if (!find_ufid(flower
.prio
, flower
.handle
, netdev
, ufid
)) {
450 match
->wc
.masks
.in_port
.odp_port
= u32_to_odp(UINT32_MAX
);
451 match
->flow
.in_port
.odp_port
= dump
->port
;
460 parse_put_flow_set_action(struct tc_flower
*flower
, const struct nlattr
*set
,
463 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(5, 20);
464 const struct nlattr
*set_attr
;
467 NL_ATTR_FOR_EACH_UNSAFE(set_attr
, set_left
, set
, set_len
) {
468 if (nl_attr_type(set_attr
) == OVS_KEY_ATTR_TUNNEL
) {
469 const struct nlattr
*tunnel
= nl_attr_get(set_attr
);
470 const size_t tunnel_len
= nl_attr_get_size(set_attr
);
471 const struct nlattr
*tun_attr
;
474 flower
->set
.set
= true;
475 NL_ATTR_FOR_EACH_UNSAFE(tun_attr
, tun_left
, tunnel
, tunnel_len
) {
476 switch (nl_attr_type(tun_attr
)) {
477 case OVS_TUNNEL_KEY_ATTR_ID
: {
478 flower
->set
.id
= nl_attr_get_be64(tun_attr
);
481 case OVS_TUNNEL_KEY_ATTR_IPV4_SRC
: {
482 flower
->set
.ipv4
.ipv4_src
= nl_attr_get_be32(tun_attr
);
485 case OVS_TUNNEL_KEY_ATTR_IPV4_DST
: {
486 flower
->set
.ipv4
.ipv4_dst
= nl_attr_get_be32(tun_attr
);
489 case OVS_TUNNEL_KEY_ATTR_IPV6_SRC
: {
490 flower
->set
.ipv6
.ipv6_src
=
491 nl_attr_get_in6_addr(tun_attr
);
494 case OVS_TUNNEL_KEY_ATTR_IPV6_DST
: {
495 flower
->set
.ipv6
.ipv6_dst
=
496 nl_attr_get_in6_addr(tun_attr
);
499 case OVS_TUNNEL_KEY_ATTR_TP_SRC
: {
500 flower
->set
.tp_src
= nl_attr_get_be16(tun_attr
);
503 case OVS_TUNNEL_KEY_ATTR_TP_DST
: {
504 flower
->set
.tp_dst
= nl_attr_get_be16(tun_attr
);
510 VLOG_DBG_RL(&rl
, "unsupported set action type: %d",
511 nl_attr_type(set_attr
));
519 test_key_and_mask(struct match
*match
)
521 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(5, 20);
522 const struct flow
*key
= &match
->flow
;
523 struct flow
*mask
= &match
->wc
.masks
;
525 if (mask
->pkt_mark
) {
526 VLOG_DBG_RL(&rl
, "offloading attribute pkt_mark isn't supported");
530 if (mask
->recirc_id
&& key
->recirc_id
) {
531 VLOG_DBG_RL(&rl
, "offloading attribute recirc_id isn't supported");
537 VLOG_DBG_RL(&rl
, "offloading attribute dp_hash isn't supported");
542 VLOG_DBG_RL(&rl
, "offloading attribute conj_id isn't supported");
546 if (mask
->skb_priority
) {
547 VLOG_DBG_RL(&rl
, "offloading attribute skb_priority isn't supported");
551 if (mask
->actset_output
) {
553 "offloading attribute actset_output isn't supported");
557 if (mask
->ct_state
) {
558 VLOG_DBG_RL(&rl
, "offloading attribute ct_state isn't supported");
563 VLOG_DBG_RL(&rl
, "offloading attribute ct_zone isn't supported");
568 VLOG_DBG_RL(&rl
, "offloading attribute ct_mark isn't supported");
572 if (mask
->packet_type
&& key
->packet_type
) {
573 VLOG_DBG_RL(&rl
, "offloading attribute packet_type isn't supported");
576 mask
->packet_type
= 0;
578 if (!ovs_u128_is_zero(mask
->ct_label
)) {
579 VLOG_DBG_RL(&rl
, "offloading attribute ct_label isn't supported");
583 for (int i
= 0; i
< FLOW_N_REGS
; i
++) {
586 "offloading attribute regs[%d] isn't supported", i
);
591 if (mask
->metadata
) {
592 VLOG_DBG_RL(&rl
, "offloading attribute metadata isn't supported");
597 VLOG_DBG_RL(&rl
, "offloading attribute nw_tos isn't supported");
602 VLOG_DBG_RL(&rl
, "offloading attribute nw_frag isn't supported");
606 for (int i
= 0; i
< FLOW_MAX_MPLS_LABELS
; i
++) {
607 if (mask
->mpls_lse
[i
]) {
608 VLOG_DBG_RL(&rl
, "offloading attribute mpls_lse isn't supported");
613 if (key
->dl_type
== htons(ETH_TYPE_IP
) &&
614 key
->nw_proto
== IPPROTO_ICMP
) {
617 "offloading attribute icmp_type isn't supported");
622 "offloading attribute icmp_code isn't supported");
625 } else if (key
->dl_type
== htons(ETH_TYPE_IP
) &&
626 key
->nw_proto
== IPPROTO_IGMP
) {
629 "offloading attribute igmp_type isn't supported");
634 "offloading attribute igmp_code isn't supported");
637 } else if (key
->dl_type
== htons(ETH_TYPE_IPV6
) &&
638 key
->nw_proto
== IPPROTO_ICMPV6
) {
641 "offloading attribute icmp_type isn't supported");
646 "offloading attribute icmp_code isn't supported");
651 if (!is_all_zeros(mask
, sizeof *mask
)) {
652 VLOG_DBG_RL(&rl
, "offloading isn't supported, unknown attribute");
660 netdev_tc_flow_put(struct netdev
*netdev
, struct match
*match
,
661 struct nlattr
*actions
, size_t actions_len
,
662 const ovs_u128
*ufid
, struct offload_info
*info
,
663 struct dpif_flow_stats
*stats OVS_UNUSED
)
665 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(5, 20);
666 struct tc_flower flower
;
667 const struct flow
*key
= &match
->flow
;
668 struct flow
*mask
= &match
->wc
.masks
;
669 const struct flow_tnl
*tnl
= &match
->flow
.tunnel
;
677 ifindex
= netdev_get_ifindex(netdev
);
679 VLOG_ERR_RL(&error_rl
, "failed to get ifindex for %s: %s",
680 netdev_get_name(netdev
), ovs_strerror(-ifindex
));
684 memset(&flower
, 0, sizeof flower
);
688 "tunnel: id %#" PRIx64
" src " IP_FMT
689 " dst " IP_FMT
" tp_src %d tp_dst %d",
691 IP_ARGS(tnl
->ip_src
), IP_ARGS(tnl
->ip_dst
),
692 ntohs(tnl
->tp_src
), ntohs(tnl
->tp_dst
));
693 flower
.tunnel
.id
= tnl
->tun_id
;
694 flower
.tunnel
.ipv4
.ipv4_src
= tnl
->ip_src
;
695 flower
.tunnel
.ipv4
.ipv4_dst
= tnl
->ip_dst
;
696 flower
.tunnel
.ipv6
.ipv6_src
= tnl
->ipv6_src
;
697 flower
.tunnel
.ipv6
.ipv6_dst
= tnl
->ipv6_dst
;
698 flower
.tunnel
.tp_src
= tnl
->tp_src
;
699 flower
.tunnel
.tp_dst
= tnl
->tp_dst
;
700 flower
.tunnel
.tunnel
= true;
702 memset(&mask
->tunnel
, 0, sizeof mask
->tunnel
);
705 flower
.key
.eth_type
= key
->dl_type
;
706 flower
.mask
.eth_type
= mask
->dl_type
;
708 if (mask
->vlans
[0].tci
) {
709 ovs_be16 vid_mask
= mask
->vlans
[0].tci
& htons(VLAN_VID_MASK
);
710 ovs_be16 pcp_mask
= mask
->vlans
[0].tci
& htons(VLAN_PCP_MASK
);
711 ovs_be16 cfi
= mask
->vlans
[0].tci
& htons(VLAN_CFI
);
713 if (cfi
&& key
->vlans
[0].tci
& htons(VLAN_CFI
)
714 && (!vid_mask
|| vid_mask
== htons(VLAN_VID_MASK
))
715 && (!pcp_mask
|| pcp_mask
== htons(VLAN_PCP_MASK
))
716 && (vid_mask
|| pcp_mask
)) {
718 flower
.key
.vlan_id
= vlan_tci_to_vid(key
->vlans
[0].tci
);
719 VLOG_DBG_RL(&rl
, "vlan_id: %d\n", flower
.key
.vlan_id
);
722 flower
.key
.vlan_prio
= vlan_tci_to_pcp(key
->vlans
[0].tci
);
723 VLOG_DBG_RL(&rl
, "vlan_prio: %d\n", flower
.key
.vlan_prio
);
725 flower
.key
.encap_eth_type
= flower
.key
.eth_type
;
726 flower
.key
.eth_type
= htons(ETH_TYPE_VLAN
);
727 } else if (mask
->vlans
[0].tci
== htons(0xffff) &&
728 ntohs(key
->vlans
[0].tci
) == 0) {
729 /* exact && no vlan */
734 } else if (mask
->vlans
[1].tci
) {
737 memset(mask
->vlans
, 0, sizeof mask
->vlans
);
739 flower
.key
.dst_mac
= key
->dl_dst
;
740 flower
.mask
.dst_mac
= mask
->dl_dst
;
741 flower
.key
.src_mac
= key
->dl_src
;
742 flower
.mask
.src_mac
= mask
->dl_src
;
743 memset(&mask
->dl_dst
, 0, sizeof mask
->dl_dst
);
744 memset(&mask
->dl_src
, 0, sizeof mask
->dl_src
);
746 mask
->in_port
.odp_port
= 0;
748 if (is_ip_any(key
)) {
749 flower
.key
.ip_proto
= key
->nw_proto
;
750 flower
.mask
.ip_proto
= mask
->nw_proto
;
751 flower
.key
.ip_ttl
= key
->nw_ttl
;
752 flower
.mask
.ip_ttl
= mask
->nw_ttl
;
754 if (key
->nw_proto
== IPPROTO_TCP
) {
755 flower
.key
.tcp_dst
= key
->tp_dst
;
756 flower
.mask
.tcp_dst
= mask
->tp_dst
;
757 flower
.key
.tcp_src
= key
->tp_src
;
758 flower
.mask
.tcp_src
= mask
->tp_src
;
759 flower
.key
.tcp_flags
= key
->tcp_flags
;
760 flower
.mask
.tcp_flags
= mask
->tcp_flags
;
764 } else if (key
->nw_proto
== IPPROTO_UDP
) {
765 flower
.key
.udp_dst
= key
->tp_dst
;
766 flower
.mask
.udp_dst
= mask
->tp_dst
;
767 flower
.key
.udp_src
= key
->tp_src
;
768 flower
.mask
.udp_src
= mask
->tp_src
;
771 } else if (key
->nw_proto
== IPPROTO_SCTP
) {
772 flower
.key
.sctp_dst
= key
->tp_dst
;
773 flower
.mask
.sctp_dst
= mask
->tp_dst
;
774 flower
.key
.sctp_src
= key
->tp_src
;
775 flower
.mask
.sctp_src
= mask
->tp_src
;
785 if (key
->dl_type
== htons(ETH_P_IP
)) {
786 flower
.key
.ipv4
.ipv4_src
= key
->nw_src
;
787 flower
.mask
.ipv4
.ipv4_src
= mask
->nw_src
;
788 flower
.key
.ipv4
.ipv4_dst
= key
->nw_dst
;
789 flower
.mask
.ipv4
.ipv4_dst
= mask
->nw_dst
;
792 } else if (key
->dl_type
== htons(ETH_P_IPV6
)) {
793 flower
.key
.ipv6
.ipv6_src
= key
->ipv6_src
;
794 flower
.mask
.ipv6
.ipv6_src
= mask
->ipv6_src
;
795 flower
.key
.ipv6
.ipv6_dst
= key
->ipv6_dst
;
796 flower
.mask
.ipv6
.ipv6_dst
= mask
->ipv6_dst
;
797 memset(&mask
->ipv6_src
, 0, sizeof mask
->ipv6_src
);
798 memset(&mask
->ipv6_dst
, 0, sizeof mask
->ipv6_dst
);
802 err
= test_key_and_mask(match
);
807 NL_ATTR_FOR_EACH(nla
, left
, actions
, actions_len
) {
808 if (nl_attr_type(nla
) == OVS_ACTION_ATTR_OUTPUT
) {
809 odp_port_t port
= nl_attr_get_odp_port(nla
);
810 struct netdev
*outdev
= netdev_ports_get(port
, info
->dpif_class
);
812 flower
.ifindex_out
= netdev_get_ifindex(outdev
);
813 flower
.set
.tp_dst
= info
->tp_dst_port
;
814 netdev_close(outdev
);
815 } else if (nl_attr_type(nla
) == OVS_ACTION_ATTR_PUSH_VLAN
) {
816 const struct ovs_action_push_vlan
*vlan_push
= nl_attr_get(nla
);
818 flower
.vlan_push_id
= vlan_tci_to_vid(vlan_push
->vlan_tci
);
819 flower
.vlan_push_prio
= vlan_tci_to_pcp(vlan_push
->vlan_tci
);
820 } else if (nl_attr_type(nla
) == OVS_ACTION_ATTR_POP_VLAN
) {
822 } else if (nl_attr_type(nla
) == OVS_ACTION_ATTR_SET
) {
823 const struct nlattr
*set
= nl_attr_get(nla
);
824 const size_t set_len
= nl_attr_get_size(nla
);
826 err
= parse_put_flow_set_action(&flower
, set
, set_len
);
831 VLOG_DBG_RL(&rl
, "unsupported put action type: %d",
837 handle
= get_ufid_tc_mapping(ufid
, &prio
, NULL
);
838 if (handle
&& prio
) {
839 VLOG_DBG_RL(&rl
, "updating old handle: %d prio: %d", handle
, prio
);
840 tc_del_filter(ifindex
, prio
, handle
);
844 prio
= get_prio_for_tc_flower(&flower
);
846 VLOG_ERR_RL(&rl
, "couldn't get tc prio: %s", ovs_strerror(ENOSPC
));
851 flower
.act_cookie
.data
= ufid
;
852 flower
.act_cookie
.len
= sizeof *ufid
;
854 err
= tc_replace_flower(ifindex
, prio
, handle
, &flower
);
856 add_ufid_tc_mapping(ufid
, flower
.prio
, flower
.handle
, netdev
, ifindex
);
863 netdev_tc_flow_get(struct netdev
*netdev OVS_UNUSED
,
865 struct nlattr
**actions
,
866 const ovs_u128
*ufid
,
867 struct dpif_flow_stats
*stats
,
870 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(5, 20);
872 struct tc_flower flower
;
879 handle
= get_ufid_tc_mapping(ufid
, &prio
, &dev
);
884 ifindex
= netdev_get_ifindex(dev
);
886 VLOG_ERR_RL(&error_rl
, "failed to get ifindex for %s: %s",
887 netdev_get_name(dev
), ovs_strerror(-ifindex
));
892 VLOG_DBG_RL(&rl
, "flow get (dev %s prio %d handle %d)",
893 netdev_get_name(dev
), prio
, handle
);
894 err
= tc_get_flower(ifindex
, prio
, handle
, &flower
);
897 VLOG_ERR_RL(&error_rl
, "flow get failed (dev %s prio %d handle %d): %s",
898 netdev_get_name(dev
), prio
, handle
, ovs_strerror(err
));
902 in_port
= netdev_ifindex_to_odp_port(ifindex
);
903 parse_tc_flower_to_match(&flower
, match
, actions
, stats
, buf
);
905 match
->wc
.masks
.in_port
.odp_port
= u32_to_odp(UINT32_MAX
);
906 match
->flow
.in_port
.odp_port
= in_port
;
912 netdev_tc_flow_del(struct netdev
*netdev OVS_UNUSED
,
913 const ovs_u128
*ufid
,
914 struct dpif_flow_stats
*stats
)
922 handle
= get_ufid_tc_mapping(ufid
, &prio
, &dev
);
927 ifindex
= netdev_get_ifindex(dev
);
929 VLOG_ERR_RL(&error_rl
, "failed to get ifindex for %s: %s",
930 netdev_get_name(dev
), ovs_strerror(-ifindex
));
935 error
= tc_del_filter(ifindex
, prio
, handle
);
936 del_ufid_tc_mapping(ufid
);
941 memset(stats
, 0, sizeof *stats
);
947 netdev_tc_init_flow_api(struct netdev
*netdev
)
952 ifindex
= netdev_get_ifindex(netdev
);
954 VLOG_ERR_RL(&error_rl
, "failed to get ifindex for %s: %s",
955 netdev_get_name(netdev
), ovs_strerror(-ifindex
));
959 error
= tc_add_del_ingress_qdisc(ifindex
, true);
961 if (error
&& error
!= EEXIST
) {
962 VLOG_ERR("failed adding ingress qdisc required for offloading: %s",
963 ovs_strerror(error
));
967 VLOG_INFO("added ingress qdisc to %s", netdev_get_name(netdev
));