2 * Copyright (c) 2016 Mellanox Technologies, Ltd.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 #include "netdev-tc-offloads.h"
20 #include <linux/if_ether.h>
21 #include "openvswitch/hmap.h"
22 #include "openvswitch/match.h"
23 #include "openvswitch/ofpbuf.h"
24 #include "openvswitch/thread.h"
25 #include "openvswitch/types.h"
26 #include "openvswitch/vlog.h"
27 #include "netdev-provider.h"
29 #include "netlink-socket.h"
30 #include "odp-netlink.h"
31 #include "unaligned.h"
36 #include "netdev-linux.h"
38 VLOG_DEFINE_THIS_MODULE(netdev_tc_offloads
);
40 static struct vlog_rate_limit error_rl
= VLOG_RATE_LIMIT_INIT(60, 5);
42 static struct hmap ufid_tc
= HMAP_INITIALIZER(&ufid_tc
);
43 static struct ovs_mutex ufid_lock
= OVS_MUTEX_INITIALIZER
;
46 * struct ufid_tc_data - data entry for ufid_tc hmap.
47 * @ufid_node: Element in @ufid_tc hash table by ufid key.
48 * @tc_node: Element in @ufid_tc hash table by prio/handle/ifindex key.
49 * @ufid: ufid assigned to the flow
52 * @ifindex: netdev ifindex.
53 * @netdev: netdev associated with the tc rule
56 struct hmap_node ufid_node
;
57 struct hmap_node tc_node
;
62 struct netdev
*netdev
;
65 /* Remove matching ufid entry from ufid_tc hashmap. */
67 del_ufid_tc_mapping(const ovs_u128
*ufid
)
69 size_t ufid_hash
= hash_bytes(ufid
, sizeof *ufid
, 0);
70 struct ufid_tc_data
*data
;
72 ovs_mutex_lock(&ufid_lock
);
73 HMAP_FOR_EACH_WITH_HASH(data
, ufid_node
, ufid_hash
, &ufid_tc
) {
74 if (ovs_u128_equals(*ufid
, data
->ufid
)) {
80 ovs_mutex_unlock(&ufid_lock
);
84 hmap_remove(&ufid_tc
, &data
->ufid_node
);
85 hmap_remove(&ufid_tc
, &data
->tc_node
);
86 netdev_close(data
->netdev
);
88 ovs_mutex_unlock(&ufid_lock
);
91 /* Add ufid entry to ufid_tc hashmap.
92 * If entry exists already it will be replaced. */
94 add_ufid_tc_mapping(const ovs_u128
*ufid
, int prio
, int handle
,
95 struct netdev
*netdev
, int ifindex
)
97 size_t ufid_hash
= hash_bytes(ufid
, sizeof *ufid
, 0);
98 size_t tc_hash
= hash_int(hash_int(prio
, handle
), ifindex
);
99 struct ufid_tc_data
*new_data
= xzalloc(sizeof *new_data
);
101 del_ufid_tc_mapping(ufid
);
103 new_data
->ufid
= *ufid
;
104 new_data
->prio
= prio
;
105 new_data
->handle
= handle
;
106 new_data
->netdev
= netdev_ref(netdev
);
107 new_data
->ifindex
= ifindex
;
109 ovs_mutex_lock(&ufid_lock
);
110 hmap_insert(&ufid_tc
, &new_data
->ufid_node
, ufid_hash
);
111 hmap_insert(&ufid_tc
, &new_data
->tc_node
, tc_hash
);
112 ovs_mutex_unlock(&ufid_lock
);
115 /* Get ufid from ufid_tc hashmap.
117 * If netdev output param is not NULL then the function will return
118 * associated netdev on success and a refcount is taken on that netdev.
119 * The caller is then responsible to close the netdev.
121 * Returns handle if successful and fill prio and netdev for that ufid.
122 * Otherwise returns 0.
125 get_ufid_tc_mapping(const ovs_u128
*ufid
, int *prio
, struct netdev
**netdev
)
127 size_t ufid_hash
= hash_bytes(ufid
, sizeof *ufid
, 0);
128 struct ufid_tc_data
*data
;
131 ovs_mutex_lock(&ufid_lock
);
132 HMAP_FOR_EACH_WITH_HASH(data
, ufid_node
, ufid_hash
, &ufid_tc
) {
133 if (ovs_u128_equals(*ufid
, data
->ufid
)) {
138 *netdev
= netdev_ref(data
->netdev
);
140 handle
= data
->handle
;
144 ovs_mutex_unlock(&ufid_lock
);
149 /* Find ufid entry in ufid_tc hashmap using prio, handle and netdev.
150 * The result is saved in ufid.
152 * Returns true on success.
155 find_ufid(int prio
, int handle
, struct netdev
*netdev
, ovs_u128
*ufid
)
157 int ifindex
= netdev_get_ifindex(netdev
);
158 struct ufid_tc_data
*data
;
159 size_t tc_hash
= hash_int(hash_int(prio
, handle
), ifindex
);
161 ovs_mutex_lock(&ufid_lock
);
162 HMAP_FOR_EACH_WITH_HASH(data
, tc_node
, tc_hash
, &ufid_tc
) {
163 if (data
->prio
== prio
&& data
->handle
== handle
164 && data
->ifindex
== ifindex
) {
169 ovs_mutex_unlock(&ufid_lock
);
171 return (data
!= NULL
);
174 struct prio_map_data
{
175 struct hmap_node node
;
176 struct tc_flower_key mask
;
181 /* Get free prio for tc flower
182 * If prio is already allocated for mask/eth_type combination then return it.
183 * If not assign new prio.
185 * Return prio on success or 0 if we are out of prios.
188 get_prio_for_tc_flower(struct tc_flower
*flower
)
190 static struct hmap prios
= HMAP_INITIALIZER(&prios
);
191 static struct ovs_mutex prios_lock
= OVS_MUTEX_INITIALIZER
;
192 static uint16_t last_prio
= 0;
193 size_t key_len
= sizeof(struct tc_flower_key
);
194 size_t hash
= hash_bytes(&flower
->mask
, key_len
,
195 (OVS_FORCE
uint32_t) flower
->key
.eth_type
);
196 struct prio_map_data
*data
;
197 struct prio_map_data
*new_data
;
199 /* We can use the same prio for same mask/eth combination but must have
200 * different prio if not. Flower classifier will reject same prio for
201 * different mask/eth combination. */
202 ovs_mutex_lock(&prios_lock
);
203 HMAP_FOR_EACH_WITH_HASH(data
, node
, hash
, &prios
) {
204 if (!memcmp(&flower
->mask
, &data
->mask
, key_len
)
205 && data
->protocol
== flower
->key
.eth_type
) {
206 ovs_mutex_unlock(&prios_lock
);
211 if (last_prio
== UINT16_MAX
) {
212 /* last_prio can overflow if there will be many different kinds of
213 * flows which shouldn't happen organically. */
214 ovs_mutex_unlock(&prios_lock
);
218 new_data
= xzalloc(sizeof *new_data
);
219 memcpy(&new_data
->mask
, &flower
->mask
, key_len
);
220 new_data
->prio
= ++last_prio
;
221 new_data
->protocol
= flower
->key
.eth_type
;
222 hmap_insert(&prios
, &new_data
->node
, hash
);
223 ovs_mutex_unlock(&prios_lock
);
225 return new_data
->prio
;
229 netdev_tc_flow_flush(struct netdev
*netdev
)
231 int ifindex
= netdev_get_ifindex(netdev
);
234 VLOG_ERR_RL(&error_rl
, "failed to get ifindex for %s: %s",
235 netdev_get_name(netdev
), ovs_strerror(-ifindex
));
239 return tc_flush(ifindex
);
243 netdev_tc_flow_dump_create(struct netdev
*netdev
,
244 struct netdev_flow_dump
**dump_out
)
246 struct netdev_flow_dump
*dump
;
249 ifindex
= netdev_get_ifindex(netdev
);
251 VLOG_ERR_RL(&error_rl
, "failed to get ifindex for %s: %s",
252 netdev_get_name(netdev
), ovs_strerror(-ifindex
));
256 dump
= xzalloc(sizeof *dump
);
257 dump
->nl_dump
= xzalloc(sizeof *dump
->nl_dump
);
258 dump
->netdev
= netdev_ref(netdev
);
259 tc_dump_flower_start(ifindex
, dump
->nl_dump
);
267 netdev_tc_flow_dump_destroy(struct netdev_flow_dump
*dump
)
269 nl_dump_done(dump
->nl_dump
);
270 netdev_close(dump
->netdev
);
277 parse_tc_flower_to_match(struct tc_flower
*flower
,
279 struct nlattr
**actions
,
280 struct dpif_flow_stats
*stats
,
281 struct ofpbuf
*buf
) {
283 struct tc_flower_key
*key
= &flower
->key
;
284 struct tc_flower_key
*mask
= &flower
->mask
;
285 odp_port_t outport
= 0;
287 if (flower
->ifindex_out
) {
288 outport
= netdev_ifindex_to_odp_port(flower
->ifindex_out
);
296 match_init_catchall(match
);
297 match_set_dl_src_masked(match
, key
->src_mac
, mask
->src_mac
);
298 match_set_dl_dst_masked(match
, key
->dst_mac
, mask
->dst_mac
);
300 if (key
->eth_type
== htons(ETH_TYPE_VLAN
)) {
301 match_set_dl_vlan(match
, htons(key
->vlan_id
));
302 match_set_dl_vlan_pcp(match
, key
->vlan_prio
);
303 match_set_dl_type(match
, key
->encap_eth_type
);
304 flow_fix_vlan_tpid(&match
->flow
);
306 match_set_dl_type(match
, key
->eth_type
);
309 if (key
->ip_proto
&& is_ip_any(&match
->flow
)) {
310 match_set_nw_proto(match
, key
->ip_proto
);
313 match_set_nw_src_masked(match
, key
->ipv4
.ipv4_src
, mask
->ipv4
.ipv4_src
);
314 match_set_nw_dst_masked(match
, key
->ipv4
.ipv4_dst
, mask
->ipv4
.ipv4_dst
);
316 match_set_ipv6_src_masked(match
,
317 &key
->ipv6
.ipv6_src
, &mask
->ipv6
.ipv6_src
);
318 match_set_ipv6_dst_masked(match
,
319 &key
->ipv6
.ipv6_dst
, &mask
->ipv6
.ipv6_dst
);
321 match_set_tp_dst_masked(match
, key
->dst_port
, mask
->dst_port
);
322 match_set_tp_src_masked(match
, key
->src_port
, mask
->src_port
);
324 if (flower
->tunnel
.tunnel
) {
325 match_set_tun_id(match
, flower
->tunnel
.id
);
326 if (flower
->tunnel
.ipv4
.ipv4_dst
) {
327 match_set_tun_src(match
, flower
->tunnel
.ipv4
.ipv4_src
);
328 match_set_tun_dst(match
, flower
->tunnel
.ipv4
.ipv4_dst
);
329 } else if (!is_all_zeros(&flower
->tunnel
.ipv6
.ipv6_dst
,
330 sizeof flower
->tunnel
.ipv6
.ipv6_dst
)) {
331 match_set_tun_ipv6_src(match
, &flower
->tunnel
.ipv6
.ipv6_src
);
332 match_set_tun_ipv6_dst(match
, &flower
->tunnel
.ipv6
.ipv6_dst
);
334 if (flower
->tunnel
.tp_dst
) {
335 match_set_tun_tp_dst(match
, flower
->tunnel
.tp_dst
);
339 act_off
= nl_msg_start_nested(buf
, OVS_FLOW_ATTR_ACTIONS
);
341 if (flower
->vlan_pop
) {
342 nl_msg_put_flag(buf
, OVS_ACTION_ATTR_POP_VLAN
);
345 if (flower
->vlan_push_id
|| flower
->vlan_push_prio
) {
346 struct ovs_action_push_vlan
*push
;
347 push
= nl_msg_put_unspec_zero(buf
, OVS_ACTION_ATTR_PUSH_VLAN
,
350 push
->vlan_tpid
= htons(ETH_TYPE_VLAN
);
351 push
->vlan_tci
= htons(flower
->vlan_push_id
352 | (flower
->vlan_push_prio
<< 13)
356 if (flower
->set
.set
) {
357 size_t set_offset
= nl_msg_start_nested(buf
, OVS_ACTION_ATTR_SET
);
358 size_t tunnel_offset
=
359 nl_msg_start_nested(buf
, OVS_KEY_ATTR_TUNNEL
);
361 nl_msg_put_be64(buf
, OVS_TUNNEL_KEY_ATTR_ID
, flower
->set
.id
);
362 if (flower
->set
.ipv4
.ipv4_src
) {
363 nl_msg_put_be32(buf
, OVS_TUNNEL_KEY_ATTR_IPV4_SRC
,
364 flower
->set
.ipv4
.ipv4_src
);
366 if (flower
->set
.ipv4
.ipv4_dst
) {
367 nl_msg_put_be32(buf
, OVS_TUNNEL_KEY_ATTR_IPV4_DST
,
368 flower
->set
.ipv4
.ipv4_dst
);
370 if (!is_all_zeros(&flower
->set
.ipv6
.ipv6_src
,
371 sizeof flower
->set
.ipv6
.ipv6_src
)) {
372 nl_msg_put_in6_addr(buf
, OVS_TUNNEL_KEY_ATTR_IPV6_SRC
,
373 &flower
->set
.ipv6
.ipv6_src
);
375 if (!is_all_zeros(&flower
->set
.ipv6
.ipv6_dst
,
376 sizeof flower
->set
.ipv6
.ipv6_dst
)) {
377 nl_msg_put_in6_addr(buf
, OVS_TUNNEL_KEY_ATTR_IPV6_DST
,
378 &flower
->set
.ipv6
.ipv6_dst
);
380 nl_msg_put_be16(buf
, OVS_TUNNEL_KEY_ATTR_TP_DST
,
383 nl_msg_end_nested(buf
, tunnel_offset
);
384 nl_msg_end_nested(buf
, set_offset
);
387 if (flower
->ifindex_out
> 0) {
388 nl_msg_put_u32(buf
, OVS_ACTION_ATTR_OUTPUT
, odp_to_u32(outport
));
392 nl_msg_end_nested(buf
, act_off
);
394 *actions
= ofpbuf_at_assert(buf
, act_off
, sizeof(struct nlattr
));
397 memset(stats
, 0, sizeof *stats
);
398 stats
->n_packets
= get_32aligned_u64(&flower
->stats
.n_packets
);
399 stats
->n_bytes
= get_32aligned_u64(&flower
->stats
.n_bytes
);
400 stats
->used
= flower
->lastused
;
407 netdev_tc_flow_dump_next(struct netdev_flow_dump
*dump
,
409 struct nlattr
**actions
,
410 struct dpif_flow_stats
*stats
,
412 struct ofpbuf
*rbuffer
,
413 struct ofpbuf
*wbuffer
)
415 struct ofpbuf nl_flow
;
417 while (nl_dump_next(dump
->nl_dump
, &nl_flow
, rbuffer
)) {
418 struct tc_flower flower
;
419 struct netdev
*netdev
= dump
->netdev
;
421 if (parse_netlink_to_tc_flower(&nl_flow
, &flower
)) {
425 if (parse_tc_flower_to_match(&flower
, match
, actions
, stats
,
430 if (flower
.act_cookie
.len
) {
431 *ufid
= *((ovs_u128
*) flower
.act_cookie
.data
);
432 } else if (!find_ufid(flower
.prio
, flower
.handle
, netdev
, ufid
)) {
436 match
->wc
.masks
.in_port
.odp_port
= u32_to_odp(UINT32_MAX
);
437 match
->flow
.in_port
.odp_port
= dump
->port
;
446 parse_put_flow_set_action(struct tc_flower
*flower
, const struct nlattr
*set
,
449 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(5, 20);
450 const struct nlattr
*set_attr
;
453 NL_ATTR_FOR_EACH_UNSAFE(set_attr
, set_left
, set
, set_len
) {
454 if (nl_attr_type(set_attr
) == OVS_KEY_ATTR_TUNNEL
) {
455 const struct nlattr
*tunnel
= nl_attr_get(set_attr
);
456 const size_t tunnel_len
= nl_attr_get_size(set_attr
);
457 const struct nlattr
*tun_attr
;
460 flower
->set
.set
= true;
461 NL_ATTR_FOR_EACH_UNSAFE(tun_attr
, tun_left
, tunnel
, tunnel_len
) {
462 switch (nl_attr_type(tun_attr
)) {
463 case OVS_TUNNEL_KEY_ATTR_ID
: {
464 flower
->set
.id
= nl_attr_get_be64(tun_attr
);
467 case OVS_TUNNEL_KEY_ATTR_IPV4_SRC
: {
468 flower
->set
.ipv4
.ipv4_src
= nl_attr_get_be32(tun_attr
);
471 case OVS_TUNNEL_KEY_ATTR_IPV4_DST
: {
472 flower
->set
.ipv4
.ipv4_dst
= nl_attr_get_be32(tun_attr
);
475 case OVS_TUNNEL_KEY_ATTR_IPV6_SRC
: {
476 flower
->set
.ipv6
.ipv6_src
=
477 nl_attr_get_in6_addr(tun_attr
);
480 case OVS_TUNNEL_KEY_ATTR_IPV6_DST
: {
481 flower
->set
.ipv6
.ipv6_dst
=
482 nl_attr_get_in6_addr(tun_attr
);
485 case OVS_TUNNEL_KEY_ATTR_TP_SRC
: {
486 flower
->set
.tp_src
= nl_attr_get_be16(tun_attr
);
489 case OVS_TUNNEL_KEY_ATTR_TP_DST
: {
490 flower
->set
.tp_dst
= nl_attr_get_be16(tun_attr
);
496 VLOG_DBG_RL(&rl
, "unsupported set action type: %d",
497 nl_attr_type(set_attr
));
505 test_key_and_mask(struct match
*match
)
507 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(5, 20);
508 const struct flow
*key
= &match
->flow
;
509 struct flow
*mask
= &match
->wc
.masks
;
511 if (mask
->pkt_mark
) {
512 VLOG_DBG_RL(&rl
, "offloading attribute pkt_mark isn't supported");
516 if (mask
->recirc_id
&& key
->recirc_id
) {
517 VLOG_DBG_RL(&rl
, "offloading attribute recirc_id isn't supported");
523 VLOG_DBG_RL(&rl
, "offloading attribute dp_hash isn't supported");
528 VLOG_DBG_RL(&rl
, "offloading attribute conj_id isn't supported");
532 if (mask
->skb_priority
) {
533 VLOG_DBG_RL(&rl
, "offloading attribute skb_priority isn't supported");
537 if (mask
->actset_output
) {
539 "offloading attribute actset_output isn't supported");
543 if (mask
->ct_state
) {
544 VLOG_DBG_RL(&rl
, "offloading attribute ct_state isn't supported");
549 VLOG_DBG_RL(&rl
, "offloading attribute ct_zone isn't supported");
554 VLOG_DBG_RL(&rl
, "offloading attribute ct_mark isn't supported");
558 if (mask
->packet_type
&& key
->packet_type
) {
559 VLOG_DBG_RL(&rl
, "offloading attribute packet_type isn't supported");
562 mask
->packet_type
= 0;
564 if (!ovs_u128_is_zero(mask
->ct_label
)) {
565 VLOG_DBG_RL(&rl
, "offloading attribute ct_label isn't supported");
569 for (int i
= 0; i
< FLOW_N_REGS
; i
++) {
572 "offloading attribute regs[%d] isn't supported", i
);
577 if (mask
->metadata
) {
578 VLOG_DBG_RL(&rl
, "offloading attribute metadata isn't supported");
583 VLOG_DBG_RL(&rl
, "offloading attribute nw_tos isn't supported");
588 VLOG_DBG_RL(&rl
, "offloading attribute nw_ttl isn't supported");
593 VLOG_DBG_RL(&rl
, "offloading attribute nw_frag isn't supported");
597 for (int i
= 0; i
< FLOW_MAX_MPLS_LABELS
; i
++) {
598 if (mask
->mpls_lse
[i
]) {
599 VLOG_DBG_RL(&rl
, "offloading attribute mpls_lse isn't supported");
604 if (key
->dl_type
== htons(ETH_TYPE_IP
) &&
605 key
->nw_proto
== IPPROTO_ICMP
) {
608 "offloading attribute icmp_type isn't supported");
613 "offloading attribute icmp_code isn't supported");
616 } else if (key
->dl_type
== htons(ETH_TYPE_IP
) &&
617 key
->nw_proto
== IPPROTO_IGMP
) {
620 "offloading attribute igmp_type isn't supported");
625 "offloading attribute igmp_code isn't supported");
628 } else if (key
->dl_type
== htons(ETH_TYPE_IPV6
) &&
629 key
->nw_proto
== IPPROTO_ICMPV6
) {
632 "offloading attribute icmp_type isn't supported");
637 "offloading attribute icmp_code isn't supported");
641 if (is_ip_any(key
) && key
->nw_proto
== IPPROTO_TCP
&& mask
->tcp_flags
) {
642 if (mask
->tcp_flags
) {
644 "offloading attribute tcp_flags isn't supported");
649 if (!is_all_zeros(mask
, sizeof *mask
)) {
650 VLOG_DBG_RL(&rl
, "offloading isn't supported, unknown attribute");
658 netdev_tc_flow_put(struct netdev
*netdev
, struct match
*match
,
659 struct nlattr
*actions
, size_t actions_len
,
660 const ovs_u128
*ufid
, struct offload_info
*info
,
661 struct dpif_flow_stats
*stats OVS_UNUSED
)
663 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(5, 20);
664 struct tc_flower flower
;
665 const struct flow
*key
= &match
->flow
;
666 struct flow
*mask
= &match
->wc
.masks
;
667 const struct flow_tnl
*tnl
= &match
->flow
.tunnel
;
675 ifindex
= netdev_get_ifindex(netdev
);
677 VLOG_ERR_RL(&error_rl
, "failed to get ifindex for %s: %s",
678 netdev_get_name(netdev
), ovs_strerror(-ifindex
));
682 memset(&flower
, 0, sizeof flower
);
686 "tunnel: id %#" PRIx64
" src " IP_FMT
687 " dst " IP_FMT
" tp_src %d tp_dst %d",
689 IP_ARGS(tnl
->ip_src
), IP_ARGS(tnl
->ip_dst
),
690 ntohs(tnl
->tp_src
), ntohs(tnl
->tp_dst
));
691 flower
.tunnel
.id
= tnl
->tun_id
;
692 flower
.tunnel
.ipv4
.ipv4_src
= tnl
->ip_src
;
693 flower
.tunnel
.ipv4
.ipv4_dst
= tnl
->ip_dst
;
694 flower
.tunnel
.ipv6
.ipv6_src
= tnl
->ipv6_src
;
695 flower
.tunnel
.ipv6
.ipv6_dst
= tnl
->ipv6_dst
;
696 flower
.tunnel
.tp_src
= tnl
->tp_src
;
697 flower
.tunnel
.tp_dst
= tnl
->tp_dst
;
698 flower
.tunnel
.tunnel
= true;
700 memset(&mask
->tunnel
, 0, sizeof mask
->tunnel
);
703 flower
.key
.eth_type
= key
->dl_type
;
704 flower
.mask
.eth_type
= mask
->dl_type
;
706 if (mask
->vlans
[0].tci
) {
707 ovs_be16 vid_mask
= mask
->vlans
[0].tci
& htons(VLAN_VID_MASK
);
708 ovs_be16 pcp_mask
= mask
->vlans
[0].tci
& htons(VLAN_PCP_MASK
);
709 ovs_be16 cfi
= mask
->vlans
[0].tci
& htons(VLAN_CFI
);
711 if (cfi
&& key
->vlans
[0].tci
& htons(VLAN_CFI
)
712 && (!vid_mask
|| vid_mask
== htons(VLAN_VID_MASK
))
713 && (!pcp_mask
|| pcp_mask
== htons(VLAN_PCP_MASK
))
714 && (vid_mask
|| pcp_mask
)) {
716 flower
.key
.vlan_id
= vlan_tci_to_vid(key
->vlans
[0].tci
);
717 VLOG_DBG_RL(&rl
, "vlan_id: %d\n", flower
.key
.vlan_id
);
720 flower
.key
.vlan_prio
= vlan_tci_to_pcp(key
->vlans
[0].tci
);
721 VLOG_DBG_RL(&rl
, "vlan_prio: %d\n", flower
.key
.vlan_prio
);
723 flower
.key
.encap_eth_type
= flower
.key
.eth_type
;
724 flower
.key
.eth_type
= htons(ETH_TYPE_VLAN
);
725 } else if (mask
->vlans
[0].tci
== htons(0xffff) &&
726 ntohs(key
->vlans
[0].tci
) == 0) {
727 /* exact && no vlan */
732 } else if (mask
->vlans
[1].tci
) {
735 memset(mask
->vlans
, 0, sizeof mask
->vlans
);
737 flower
.key
.dst_mac
= key
->dl_dst
;
738 flower
.mask
.dst_mac
= mask
->dl_dst
;
739 flower
.key
.src_mac
= key
->dl_src
;
740 flower
.mask
.src_mac
= mask
->dl_src
;
741 memset(&mask
->dl_dst
, 0, sizeof mask
->dl_dst
);
742 memset(&mask
->dl_src
, 0, sizeof mask
->dl_src
);
744 mask
->in_port
.odp_port
= 0;
746 if (is_ip_any(key
)) {
747 flower
.key
.ip_proto
= key
->nw_proto
;
748 flower
.mask
.ip_proto
= mask
->nw_proto
;
750 if (key
->nw_proto
== IPPROTO_TCP
|| key
->nw_proto
== IPPROTO_UDP
) {
751 flower
.key
.dst_port
= key
->tp_dst
;
752 flower
.mask
.dst_port
= mask
->tp_dst
;
753 flower
.key
.src_port
= key
->tp_src
;
754 flower
.mask
.src_port
= mask
->tp_src
;
763 if (key
->dl_type
== htons(ETH_P_IP
)) {
764 flower
.key
.ipv4
.ipv4_src
= key
->nw_src
;
765 flower
.mask
.ipv4
.ipv4_src
= mask
->nw_src
;
766 flower
.key
.ipv4
.ipv4_dst
= key
->nw_dst
;
767 flower
.mask
.ipv4
.ipv4_dst
= mask
->nw_dst
;
770 } else if (key
->dl_type
== htons(ETH_P_IPV6
)) {
771 flower
.key
.ipv6
.ipv6_src
= key
->ipv6_src
;
772 flower
.mask
.ipv6
.ipv6_src
= mask
->ipv6_src
;
773 flower
.key
.ipv6
.ipv6_dst
= key
->ipv6_dst
;
774 flower
.mask
.ipv6
.ipv6_dst
= mask
->ipv6_dst
;
775 memset(&mask
->ipv6_src
, 0, sizeof mask
->ipv6_src
);
776 memset(&mask
->ipv6_dst
, 0, sizeof mask
->ipv6_dst
);
780 err
= test_key_and_mask(match
);
785 NL_ATTR_FOR_EACH(nla
, left
, actions
, actions_len
) {
786 if (nl_attr_type(nla
) == OVS_ACTION_ATTR_OUTPUT
) {
787 odp_port_t port
= nl_attr_get_odp_port(nla
);
788 struct netdev
*outdev
= netdev_ports_get(port
,
789 info
->port_hmap_obj
);
791 flower
.ifindex_out
= netdev_get_ifindex(outdev
);
792 flower
.set
.tp_dst
= info
->tp_dst_port
;
793 netdev_close(outdev
);
794 } else if (nl_attr_type(nla
) == OVS_ACTION_ATTR_PUSH_VLAN
) {
795 const struct ovs_action_push_vlan
*vlan_push
= nl_attr_get(nla
);
797 flower
.vlan_push_id
= vlan_tci_to_vid(vlan_push
->vlan_tci
);
798 flower
.vlan_push_prio
= vlan_tci_to_pcp(vlan_push
->vlan_tci
);
799 } else if (nl_attr_type(nla
) == OVS_ACTION_ATTR_POP_VLAN
) {
801 } else if (nl_attr_type(nla
) == OVS_ACTION_ATTR_SET
) {
802 const struct nlattr
*set
= nl_attr_get(nla
);
803 const size_t set_len
= nl_attr_get_size(nla
);
805 err
= parse_put_flow_set_action(&flower
, set
, set_len
);
810 VLOG_DBG_RL(&rl
, "unsupported put action type: %d",
816 handle
= get_ufid_tc_mapping(ufid
, &prio
, NULL
);
817 if (handle
&& prio
) {
818 VLOG_DBG_RL(&rl
, "updating old handle: %d prio: %d", handle
, prio
);
819 tc_del_filter(ifindex
, prio
, handle
);
823 prio
= get_prio_for_tc_flower(&flower
);
825 VLOG_ERR_RL(&rl
, "couldn't get tc prio: %s", ovs_strerror(ENOSPC
));
830 flower
.act_cookie
.data
= ufid
;
831 flower
.act_cookie
.len
= sizeof *ufid
;
833 err
= tc_replace_flower(ifindex
, prio
, handle
, &flower
);
835 add_ufid_tc_mapping(ufid
, flower
.prio
, flower
.handle
, netdev
, ifindex
);
842 netdev_tc_flow_get(struct netdev
*netdev OVS_UNUSED
,
844 struct nlattr
**actions
,
845 const ovs_u128
*ufid
,
846 struct dpif_flow_stats
*stats
,
849 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(5, 20);
851 struct tc_flower flower
;
858 handle
= get_ufid_tc_mapping(ufid
, &prio
, &dev
);
863 ifindex
= netdev_get_ifindex(dev
);
865 VLOG_ERR_RL(&error_rl
, "failed to get ifindex for %s: %s",
866 netdev_get_name(dev
), ovs_strerror(-ifindex
));
871 VLOG_DBG_RL(&rl
, "flow get (dev %s prio %d handle %d)",
872 netdev_get_name(dev
), prio
, handle
);
873 err
= tc_get_flower(ifindex
, prio
, handle
, &flower
);
876 VLOG_ERR_RL(&error_rl
, "flow get failed (dev %s prio %d handle %d): %s",
877 netdev_get_name(dev
), prio
, handle
, ovs_strerror(err
));
881 in_port
= netdev_ifindex_to_odp_port(ifindex
);
882 parse_tc_flower_to_match(&flower
, match
, actions
, stats
, buf
);
884 match
->wc
.masks
.in_port
.odp_port
= u32_to_odp(UINT32_MAX
);
885 match
->flow
.in_port
.odp_port
= in_port
;
891 netdev_tc_flow_del(struct netdev
*netdev OVS_UNUSED
,
892 const ovs_u128
*ufid
,
893 struct dpif_flow_stats
*stats
)
901 handle
= get_ufid_tc_mapping(ufid
, &prio
, &dev
);
906 ifindex
= netdev_get_ifindex(dev
);
908 VLOG_ERR_RL(&error_rl
, "failed to get ifindex for %s: %s",
909 netdev_get_name(dev
), ovs_strerror(-ifindex
));
914 error
= tc_del_filter(ifindex
, prio
, handle
);
915 del_ufid_tc_mapping(ufid
);
920 memset(stats
, 0, sizeof *stats
);
926 netdev_tc_init_flow_api(struct netdev
*netdev
)
931 ifindex
= netdev_get_ifindex(netdev
);
933 VLOG_ERR_RL(&error_rl
, "failed to get ifindex for %s: %s",
934 netdev_get_name(netdev
), ovs_strerror(-ifindex
));
938 error
= tc_add_del_ingress_qdisc(ifindex
, true);
940 if (error
&& error
!= EEXIST
) {
941 VLOG_ERR("failed adding ingress qdisc required for offloading: %s",
942 ovs_strerror(error
));
946 VLOG_INFO("added ingress qdisc to %s", netdev_get_name(netdev
));