2 * Copyright (c) 2014, 2015, 2016, 2017 Nicira, Inc.
3 * Copyright (c) 2019 Mellanox Technologies, Ltd.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
22 #include "dpif-netdev.h"
23 #include "netdev-offload-provider.h"
24 #include "netdev-provider.h"
25 #include "openvswitch/match.h"
26 #include "openvswitch/vlog.h"
30 VLOG_DEFINE_THIS_MODULE(netdev_offload_dpdk
);
35 * Below API is NOT thread safe in following terms:
37 * - The caller must be sure that none of these functions will be called
38 * simultaneously. Even for different 'netdev's.
40 * - The caller must be sure that 'netdev' will not be destructed/deallocated.
42 * - The caller must be sure that 'netdev' configuration will not be changed.
43 * For example, simultaneous call of 'netdev_reconfigure()' for the same
44 * 'netdev' is forbidden.
46 * For current implementation all above restrictions could be fulfilled by
47 * taking the datapath 'port_mutex' in lib/dpif-netdev.c. */
50 * A mapping from ufid to dpdk rte_flow.
52 static struct cmap ufid_to_rte_flow
= CMAP_INITIALIZER
;
54 struct ufid_to_rte_flow_data
{
55 struct cmap_node node
;
57 struct rte_flow
*rte_flow
;
60 /* Find rte_flow with @ufid. */
61 static struct rte_flow
*
62 ufid_to_rte_flow_find(const ovs_u128
*ufid
)
64 size_t hash
= hash_bytes(ufid
, sizeof *ufid
, 0);
65 struct ufid_to_rte_flow_data
*data
;
67 CMAP_FOR_EACH_WITH_HASH (data
, node
, hash
, &ufid_to_rte_flow
) {
68 if (ovs_u128_equals(*ufid
, data
->ufid
)) {
69 return data
->rte_flow
;
77 ufid_to_rte_flow_associate(const ovs_u128
*ufid
,
78 struct rte_flow
*rte_flow
)
80 size_t hash
= hash_bytes(ufid
, sizeof *ufid
, 0);
81 struct ufid_to_rte_flow_data
*data
= xzalloc(sizeof *data
);
84 * We should not simply overwrite an existing rte flow.
85 * We should have deleted it first before re-adding it.
86 * Thus, if following assert triggers, something is wrong:
87 * the rte_flow is not destroyed.
89 ovs_assert(ufid_to_rte_flow_find(ufid
) == NULL
);
92 data
->rte_flow
= rte_flow
;
94 cmap_insert(&ufid_to_rte_flow
,
95 CONST_CAST(struct cmap_node
*, &data
->node
), hash
);
99 ufid_to_rte_flow_disassociate(const ovs_u128
*ufid
)
101 size_t hash
= hash_bytes(ufid
, sizeof *ufid
, 0);
102 struct ufid_to_rte_flow_data
*data
;
104 CMAP_FOR_EACH_WITH_HASH (data
, node
, hash
, &ufid_to_rte_flow
) {
105 if (ovs_u128_equals(*ufid
, data
->ufid
)) {
106 cmap_remove(&ufid_to_rte_flow
,
107 CONST_CAST(struct cmap_node
*, &data
->node
), hash
);
108 ovsrcu_postpone(free
, data
);
113 VLOG_WARN("ufid "UUID_FMT
" is not associated with an rte flow\n",
114 UUID_ARGS((struct uuid
*) ufid
));
118 * To avoid individual xrealloc calls for each new element, a 'curent_max'
119 * is used to keep track of current allocated number of elements. Starts
120 * by 8 and doubles on each xrealloc call.
122 struct flow_patterns
{
123 struct rte_flow_item
*items
;
128 struct flow_actions
{
129 struct rte_flow_action
*actions
;
135 dump_flow_pattern(struct rte_flow_item
*item
)
139 if (!VLOG_IS_DBG_ENABLED() || item
->type
== RTE_FLOW_ITEM_TYPE_END
) {
145 if (item
->type
== RTE_FLOW_ITEM_TYPE_ETH
) {
146 const struct rte_flow_item_eth
*eth_spec
= item
->spec
;
147 const struct rte_flow_item_eth
*eth_mask
= item
->mask
;
149 ds_put_cstr(&s
, "rte flow eth pattern:\n");
152 " Spec: src="ETH_ADDR_FMT
", dst="ETH_ADDR_FMT
", "
153 "type=0x%04" PRIx16
"\n",
154 ETH_ADDR_BYTES_ARGS(eth_spec
->src
.addr_bytes
),
155 ETH_ADDR_BYTES_ARGS(eth_spec
->dst
.addr_bytes
),
156 ntohs(eth_spec
->type
));
158 ds_put_cstr(&s
, " Spec = null\n");
162 " Mask: src="ETH_ADDR_FMT
", dst="ETH_ADDR_FMT
", "
163 "type=0x%04"PRIx16
"\n",
164 ETH_ADDR_BYTES_ARGS(eth_mask
->src
.addr_bytes
),
165 ETH_ADDR_BYTES_ARGS(eth_mask
->dst
.addr_bytes
),
166 ntohs(eth_mask
->type
));
168 ds_put_cstr(&s
, " Mask = null\n");
172 if (item
->type
== RTE_FLOW_ITEM_TYPE_VLAN
) {
173 const struct rte_flow_item_vlan
*vlan_spec
= item
->spec
;
174 const struct rte_flow_item_vlan
*vlan_mask
= item
->mask
;
176 ds_put_cstr(&s
, "rte flow vlan pattern:\n");
179 " Spec: inner_type=0x%"PRIx16
", tci=0x%"PRIx16
"\n",
180 ntohs(vlan_spec
->inner_type
), ntohs(vlan_spec
->tci
));
182 ds_put_cstr(&s
, " Spec = null\n");
187 " Mask: inner_type=0x%"PRIx16
", tci=0x%"PRIx16
"\n",
188 ntohs(vlan_mask
->inner_type
), ntohs(vlan_mask
->tci
));
190 ds_put_cstr(&s
, " Mask = null\n");
194 if (item
->type
== RTE_FLOW_ITEM_TYPE_IPV4
) {
195 const struct rte_flow_item_ipv4
*ipv4_spec
= item
->spec
;
196 const struct rte_flow_item_ipv4
*ipv4_mask
= item
->mask
;
198 ds_put_cstr(&s
, "rte flow ipv4 pattern:\n");
201 " Spec: tos=0x%"PRIx8
", ttl=%"PRIx8
203 ", src="IP_FMT
", dst="IP_FMT
"\n",
204 ipv4_spec
->hdr
.type_of_service
,
205 ipv4_spec
->hdr
.time_to_live
,
206 ipv4_spec
->hdr
.next_proto_id
,
207 IP_ARGS(ipv4_spec
->hdr
.src_addr
),
208 IP_ARGS(ipv4_spec
->hdr
.dst_addr
));
210 ds_put_cstr(&s
, " Spec = null\n");
214 " Mask: tos=0x%"PRIx8
", ttl=%"PRIx8
216 ", src="IP_FMT
", dst="IP_FMT
"\n",
217 ipv4_mask
->hdr
.type_of_service
,
218 ipv4_mask
->hdr
.time_to_live
,
219 ipv4_mask
->hdr
.next_proto_id
,
220 IP_ARGS(ipv4_mask
->hdr
.src_addr
),
221 IP_ARGS(ipv4_mask
->hdr
.dst_addr
));
223 ds_put_cstr(&s
, " Mask = null\n");
227 if (item
->type
== RTE_FLOW_ITEM_TYPE_UDP
) {
228 const struct rte_flow_item_udp
*udp_spec
= item
->spec
;
229 const struct rte_flow_item_udp
*udp_mask
= item
->mask
;
231 ds_put_cstr(&s
, "rte flow udp pattern:\n");
234 " Spec: src_port=%"PRIu16
", dst_port=%"PRIu16
"\n",
235 ntohs(udp_spec
->hdr
.src_port
),
236 ntohs(udp_spec
->hdr
.dst_port
));
238 ds_put_cstr(&s
, " Spec = null\n");
242 " Mask: src_port=0x%"PRIx16
243 ", dst_port=0x%"PRIx16
"\n",
244 ntohs(udp_mask
->hdr
.src_port
),
245 ntohs(udp_mask
->hdr
.dst_port
));
247 ds_put_cstr(&s
, " Mask = null\n");
251 if (item
->type
== RTE_FLOW_ITEM_TYPE_SCTP
) {
252 const struct rte_flow_item_sctp
*sctp_spec
= item
->spec
;
253 const struct rte_flow_item_sctp
*sctp_mask
= item
->mask
;
255 ds_put_cstr(&s
, "rte flow sctp pattern:\n");
258 " Spec: src_port=%"PRIu16
", dst_port=%"PRIu16
"\n",
259 ntohs(sctp_spec
->hdr
.src_port
),
260 ntohs(sctp_spec
->hdr
.dst_port
));
262 ds_put_cstr(&s
, " Spec = null\n");
266 " Mask: src_port=0x%"PRIx16
267 ", dst_port=0x%"PRIx16
"\n",
268 ntohs(sctp_mask
->hdr
.src_port
),
269 ntohs(sctp_mask
->hdr
.dst_port
));
271 ds_put_cstr(&s
, " Mask = null\n");
275 if (item
->type
== RTE_FLOW_ITEM_TYPE_ICMP
) {
276 const struct rte_flow_item_icmp
*icmp_spec
= item
->spec
;
277 const struct rte_flow_item_icmp
*icmp_mask
= item
->mask
;
279 ds_put_cstr(&s
, "rte flow icmp pattern:\n");
282 " Spec: icmp_type=%"PRIu8
", icmp_code=%"PRIu8
"\n",
283 icmp_spec
->hdr
.icmp_type
,
284 icmp_spec
->hdr
.icmp_code
);
286 ds_put_cstr(&s
, " Spec = null\n");
290 " Mask: icmp_type=0x%"PRIx8
291 ", icmp_code=0x%"PRIx8
"\n",
292 icmp_spec
->hdr
.icmp_type
,
293 icmp_spec
->hdr
.icmp_code
);
295 ds_put_cstr(&s
, " Mask = null\n");
299 if (item
->type
== RTE_FLOW_ITEM_TYPE_TCP
) {
300 const struct rte_flow_item_tcp
*tcp_spec
= item
->spec
;
301 const struct rte_flow_item_tcp
*tcp_mask
= item
->mask
;
303 ds_put_cstr(&s
, "rte flow tcp pattern:\n");
306 " Spec: src_port=%"PRIu16
", dst_port=%"PRIu16
307 ", data_off=0x%"PRIx8
", tcp_flags=0x%"PRIx8
"\n",
308 ntohs(tcp_spec
->hdr
.src_port
),
309 ntohs(tcp_spec
->hdr
.dst_port
),
310 tcp_spec
->hdr
.data_off
,
311 tcp_spec
->hdr
.tcp_flags
);
313 ds_put_cstr(&s
, " Spec = null\n");
317 " Mask: src_port=%"PRIx16
", dst_port=%"PRIx16
318 ", data_off=0x%"PRIx8
", tcp_flags=0x%"PRIx8
"\n",
319 ntohs(tcp_mask
->hdr
.src_port
),
320 ntohs(tcp_mask
->hdr
.dst_port
),
321 tcp_mask
->hdr
.data_off
,
322 tcp_mask
->hdr
.tcp_flags
);
324 ds_put_cstr(&s
, " Mask = null\n");
328 VLOG_DBG("%s", ds_cstr(&s
));
333 add_flow_pattern(struct flow_patterns
*patterns
, enum rte_flow_item_type type
,
334 const void *spec
, const void *mask
)
336 int cnt
= patterns
->cnt
;
339 patterns
->current_max
= 8;
340 patterns
->items
= xcalloc(patterns
->current_max
,
341 sizeof *patterns
->items
);
342 } else if (cnt
== patterns
->current_max
) {
343 patterns
->current_max
*= 2;
344 patterns
->items
= xrealloc(patterns
->items
, patterns
->current_max
*
345 sizeof *patterns
->items
);
348 patterns
->items
[cnt
].type
= type
;
349 patterns
->items
[cnt
].spec
= spec
;
350 patterns
->items
[cnt
].mask
= mask
;
351 patterns
->items
[cnt
].last
= NULL
;
352 dump_flow_pattern(&patterns
->items
[cnt
]);
357 add_flow_action(struct flow_actions
*actions
, enum rte_flow_action_type type
,
360 int cnt
= actions
->cnt
;
363 actions
->current_max
= 8;
364 actions
->actions
= xcalloc(actions
->current_max
,
365 sizeof *actions
->actions
);
366 } else if (cnt
== actions
->current_max
) {
367 actions
->current_max
*= 2;
368 actions
->actions
= xrealloc(actions
->actions
, actions
->current_max
*
369 sizeof *actions
->actions
);
372 actions
->actions
[cnt
].type
= type
;
373 actions
->actions
[cnt
].conf
= conf
;
377 struct action_rss_data
{
378 struct rte_flow_action_rss conf
;
382 static struct action_rss_data
*
383 add_flow_rss_action(struct flow_actions
*actions
,
384 struct netdev
*netdev
)
387 struct action_rss_data
*rss_data
;
389 rss_data
= xmalloc(sizeof *rss_data
+
390 netdev_n_rxq(netdev
) * sizeof rss_data
->queue
[0]);
391 *rss_data
= (struct action_rss_data
) {
392 .conf
= (struct rte_flow_action_rss
) {
393 .func
= RTE_ETH_HASH_FUNCTION_DEFAULT
,
396 .queue_num
= netdev_n_rxq(netdev
),
397 .queue
= rss_data
->queue
,
403 /* Override queue array with default. */
404 for (i
= 0; i
< netdev_n_rxq(netdev
); i
++) {
405 rss_data
->queue
[i
] = i
;
408 add_flow_action(actions
, RTE_FLOW_ACTION_TYPE_RSS
, &rss_data
->conf
);
414 netdev_offload_dpdk_add_flow(struct netdev
*netdev
,
415 const struct match
*match
,
416 struct nlattr
*nl_actions OVS_UNUSED
,
417 size_t actions_len OVS_UNUSED
,
418 const ovs_u128
*ufid
,
419 struct offload_info
*info
)
421 const struct rte_flow_attr flow_attr
= {
427 struct flow_patterns patterns
= { .items
= NULL
, .cnt
= 0 };
428 struct flow_actions actions
= { .actions
= NULL
, .cnt
= 0 };
429 struct rte_flow
*flow
;
430 struct rte_flow_error error
;
434 struct rte_flow_item_eth eth
;
435 struct rte_flow_item_vlan vlan
;
436 struct rte_flow_item_ipv4 ipv4
;
438 struct rte_flow_item_tcp tcp
;
439 struct rte_flow_item_udp udp
;
440 struct rte_flow_item_sctp sctp
;
441 struct rte_flow_item_icmp icmp
;
445 memset(&spec
, 0, sizeof spec
);
446 memset(&mask
, 0, sizeof mask
);
449 if (!eth_addr_is_zero(match
->wc
.masks
.dl_src
) ||
450 !eth_addr_is_zero(match
->wc
.masks
.dl_dst
)) {
451 memcpy(&spec
.eth
.dst
, &match
->flow
.dl_dst
, sizeof spec
.eth
.dst
);
452 memcpy(&spec
.eth
.src
, &match
->flow
.dl_src
, sizeof spec
.eth
.src
);
453 spec
.eth
.type
= match
->flow
.dl_type
;
455 memcpy(&mask
.eth
.dst
, &match
->wc
.masks
.dl_dst
, sizeof mask
.eth
.dst
);
456 memcpy(&mask
.eth
.src
, &match
->wc
.masks
.dl_src
, sizeof mask
.eth
.src
);
457 mask
.eth
.type
= match
->wc
.masks
.dl_type
;
459 add_flow_pattern(&patterns
, RTE_FLOW_ITEM_TYPE_ETH
,
460 &spec
.eth
, &mask
.eth
);
463 * If user specifies a flow (like UDP flow) without L2 patterns,
464 * OVS will at least set the dl_type. Normally, it's enough to
465 * create an eth pattern just with it. Unluckily, some Intel's
466 * NIC (such as XL710) doesn't support that. Below is a workaround,
467 * which simply matches any L2 pkts.
469 add_flow_pattern(&patterns
, RTE_FLOW_ITEM_TYPE_ETH
, NULL
, NULL
);
473 if (match
->wc
.masks
.vlans
[0].tci
&& match
->flow
.vlans
[0].tci
) {
474 spec
.vlan
.tci
= match
->flow
.vlans
[0].tci
& ~htons(VLAN_CFI
);
475 mask
.vlan
.tci
= match
->wc
.masks
.vlans
[0].tci
& ~htons(VLAN_CFI
);
477 /* Match any protocols. */
478 mask
.vlan
.inner_type
= 0;
480 add_flow_pattern(&patterns
, RTE_FLOW_ITEM_TYPE_VLAN
,
481 &spec
.vlan
, &mask
.vlan
);
485 if (match
->flow
.dl_type
== htons(ETH_TYPE_IP
)) {
486 spec
.ipv4
.hdr
.type_of_service
= match
->flow
.nw_tos
;
487 spec
.ipv4
.hdr
.time_to_live
= match
->flow
.nw_ttl
;
488 spec
.ipv4
.hdr
.next_proto_id
= match
->flow
.nw_proto
;
489 spec
.ipv4
.hdr
.src_addr
= match
->flow
.nw_src
;
490 spec
.ipv4
.hdr
.dst_addr
= match
->flow
.nw_dst
;
492 mask
.ipv4
.hdr
.type_of_service
= match
->wc
.masks
.nw_tos
;
493 mask
.ipv4
.hdr
.time_to_live
= match
->wc
.masks
.nw_ttl
;
494 mask
.ipv4
.hdr
.next_proto_id
= match
->wc
.masks
.nw_proto
;
495 mask
.ipv4
.hdr
.src_addr
= match
->wc
.masks
.nw_src
;
496 mask
.ipv4
.hdr
.dst_addr
= match
->wc
.masks
.nw_dst
;
498 add_flow_pattern(&patterns
, RTE_FLOW_ITEM_TYPE_IPV4
,
499 &spec
.ipv4
, &mask
.ipv4
);
501 /* Save proto for L4 protocol setup. */
502 proto
= spec
.ipv4
.hdr
.next_proto_id
&
503 mask
.ipv4
.hdr
.next_proto_id
;
506 if (proto
!= IPPROTO_ICMP
&& proto
!= IPPROTO_UDP
&&
507 proto
!= IPPROTO_SCTP
&& proto
!= IPPROTO_TCP
&&
508 (match
->wc
.masks
.tp_src
||
509 match
->wc
.masks
.tp_dst
||
510 match
->wc
.masks
.tcp_flags
)) {
511 VLOG_DBG("L4 Protocol (%u) not supported", proto
);
516 if ((match
->wc
.masks
.tp_src
&& match
->wc
.masks
.tp_src
!= OVS_BE16_MAX
) ||
517 (match
->wc
.masks
.tp_dst
&& match
->wc
.masks
.tp_dst
!= OVS_BE16_MAX
)) {
524 spec
.tcp
.hdr
.src_port
= match
->flow
.tp_src
;
525 spec
.tcp
.hdr
.dst_port
= match
->flow
.tp_dst
;
526 spec
.tcp
.hdr
.data_off
= ntohs(match
->flow
.tcp_flags
) >> 8;
527 spec
.tcp
.hdr
.tcp_flags
= ntohs(match
->flow
.tcp_flags
) & 0xff;
529 mask
.tcp
.hdr
.src_port
= match
->wc
.masks
.tp_src
;
530 mask
.tcp
.hdr
.dst_port
= match
->wc
.masks
.tp_dst
;
531 mask
.tcp
.hdr
.data_off
= ntohs(match
->wc
.masks
.tcp_flags
) >> 8;
532 mask
.tcp
.hdr
.tcp_flags
= ntohs(match
->wc
.masks
.tcp_flags
) & 0xff;
534 add_flow_pattern(&patterns
, RTE_FLOW_ITEM_TYPE_TCP
,
535 &spec
.tcp
, &mask
.tcp
);
537 /* proto == TCP and ITEM_TYPE_TCP, thus no need for proto match. */
538 mask
.ipv4
.hdr
.next_proto_id
= 0;
542 spec
.udp
.hdr
.src_port
= match
->flow
.tp_src
;
543 spec
.udp
.hdr
.dst_port
= match
->flow
.tp_dst
;
545 mask
.udp
.hdr
.src_port
= match
->wc
.masks
.tp_src
;
546 mask
.udp
.hdr
.dst_port
= match
->wc
.masks
.tp_dst
;
548 add_flow_pattern(&patterns
, RTE_FLOW_ITEM_TYPE_UDP
,
549 &spec
.udp
, &mask
.udp
);
551 /* proto == UDP and ITEM_TYPE_UDP, thus no need for proto match. */
552 mask
.ipv4
.hdr
.next_proto_id
= 0;
556 spec
.sctp
.hdr
.src_port
= match
->flow
.tp_src
;
557 spec
.sctp
.hdr
.dst_port
= match
->flow
.tp_dst
;
559 mask
.sctp
.hdr
.src_port
= match
->wc
.masks
.tp_src
;
560 mask
.sctp
.hdr
.dst_port
= match
->wc
.masks
.tp_dst
;
562 add_flow_pattern(&patterns
, RTE_FLOW_ITEM_TYPE_SCTP
,
563 &spec
.sctp
, &mask
.sctp
);
565 /* proto == SCTP and ITEM_TYPE_SCTP, thus no need for proto match. */
566 mask
.ipv4
.hdr
.next_proto_id
= 0;
570 spec
.icmp
.hdr
.icmp_type
= (uint8_t) ntohs(match
->flow
.tp_src
);
571 spec
.icmp
.hdr
.icmp_code
= (uint8_t) ntohs(match
->flow
.tp_dst
);
573 mask
.icmp
.hdr
.icmp_type
= (uint8_t) ntohs(match
->wc
.masks
.tp_src
);
574 mask
.icmp
.hdr
.icmp_code
= (uint8_t) ntohs(match
->wc
.masks
.tp_dst
);
576 add_flow_pattern(&patterns
, RTE_FLOW_ITEM_TYPE_ICMP
,
577 &spec
.icmp
, &mask
.icmp
);
579 /* proto == ICMP and ITEM_TYPE_ICMP, thus no need for proto match. */
580 mask
.ipv4
.hdr
.next_proto_id
= 0;
584 add_flow_pattern(&patterns
, RTE_FLOW_ITEM_TYPE_END
, NULL
, NULL
);
586 struct rte_flow_action_mark mark
;
587 struct action_rss_data
*rss
;
589 mark
.id
= info
->flow_mark
;
590 add_flow_action(&actions
, RTE_FLOW_ACTION_TYPE_MARK
, &mark
);
592 rss
= add_flow_rss_action(&actions
, netdev
);
593 add_flow_action(&actions
, RTE_FLOW_ACTION_TYPE_END
, NULL
);
595 flow
= netdev_dpdk_rte_flow_create(netdev
, &flow_attr
,
597 actions
.actions
, &error
);
601 VLOG_ERR("%s: rte flow creat error: %u : message : %s\n",
602 netdev_get_name(netdev
), error
.type
, error
.message
);
606 ufid_to_rte_flow_associate(ufid
, flow
);
607 VLOG_DBG("%s: installed flow %p by ufid "UUID_FMT
"\n",
608 netdev_get_name(netdev
), flow
, UUID_ARGS((struct uuid
*)ufid
));
611 free(patterns
.items
);
612 free(actions
.actions
);
617 * Check if any unsupported flow patterns are specified.
620 netdev_offload_dpdk_validate_flow(const struct match
*match
)
622 struct match match_zero_wc
;
623 const struct flow
*masks
= &match
->wc
.masks
;
625 /* Create a wc-zeroed version of flow. */
626 match_init(&match_zero_wc
, &match
->flow
, &match
->wc
);
628 if (!is_all_zeros(&match_zero_wc
.flow
.tunnel
,
629 sizeof match_zero_wc
.flow
.tunnel
)) {
633 if (masks
->metadata
|| masks
->skb_priority
||
634 masks
->pkt_mark
|| masks
->dp_hash
) {
638 /* recirc id must be zero. */
639 if (match_zero_wc
.flow
.recirc_id
) {
643 if (masks
->ct_state
|| masks
->ct_nw_proto
||
644 masks
->ct_zone
|| masks
->ct_mark
||
645 !ovs_u128_is_zero(masks
->ct_label
)) {
649 if (masks
->conj_id
|| masks
->actset_output
) {
653 /* Unsupported L2. */
654 if (!is_all_zeros(masks
->mpls_lse
, sizeof masks
->mpls_lse
)) {
658 /* Unsupported L3. */
659 if (masks
->ipv6_label
|| masks
->ct_nw_src
|| masks
->ct_nw_dst
||
660 !is_all_zeros(&masks
->ipv6_src
, sizeof masks
->ipv6_src
) ||
661 !is_all_zeros(&masks
->ipv6_dst
, sizeof masks
->ipv6_dst
) ||
662 !is_all_zeros(&masks
->ct_ipv6_src
, sizeof masks
->ct_ipv6_src
) ||
663 !is_all_zeros(&masks
->ct_ipv6_dst
, sizeof masks
->ct_ipv6_dst
) ||
664 !is_all_zeros(&masks
->nd_target
, sizeof masks
->nd_target
) ||
665 !is_all_zeros(&masks
->nsh
, sizeof masks
->nsh
) ||
666 !is_all_zeros(&masks
->arp_sha
, sizeof masks
->arp_sha
) ||
667 !is_all_zeros(&masks
->arp_tha
, sizeof masks
->arp_tha
)) {
671 /* If fragmented, then don't HW accelerate - for now. */
672 if (match_zero_wc
.flow
.nw_frag
) {
676 /* Unsupported L4. */
677 if (masks
->igmp_group_ip4
|| masks
->ct_tp_src
|| masks
->ct_tp_dst
) {
684 VLOG_ERR("cannot HW accelerate this flow due to unsupported protocols");
689 netdev_offload_dpdk_destroy_flow(struct netdev
*netdev
,
690 const ovs_u128
*ufid
,
691 struct rte_flow
*rte_flow
)
693 struct rte_flow_error error
;
694 int ret
= netdev_dpdk_rte_flow_destroy(netdev
, rte_flow
, &error
);
697 ufid_to_rte_flow_disassociate(ufid
);
698 VLOG_DBG("%s: removed rte flow %p associated with ufid " UUID_FMT
"\n",
699 netdev_get_name(netdev
), rte_flow
,
700 UUID_ARGS((struct uuid
*)ufid
));
702 VLOG_ERR("%s: rte flow destroy error: %u : message : %s\n",
703 netdev_get_name(netdev
), error
.type
, error
.message
);
710 netdev_offload_dpdk_flow_put(struct netdev
*netdev
, struct match
*match
,
711 struct nlattr
*actions
, size_t actions_len
,
712 const ovs_u128
*ufid
, struct offload_info
*info
,
713 struct dpif_flow_stats
*stats OVS_UNUSED
)
715 struct rte_flow
*rte_flow
;
719 * If an old rte_flow exists, it means it's a flow modification.
720 * Here destroy the old rte flow first before adding a new one.
722 rte_flow
= ufid_to_rte_flow_find(ufid
);
724 ret
= netdev_offload_dpdk_destroy_flow(netdev
, ufid
, rte_flow
);
730 ret
= netdev_offload_dpdk_validate_flow(match
);
735 return netdev_offload_dpdk_add_flow(netdev
, match
, actions
,
736 actions_len
, ufid
, info
);
740 netdev_offload_dpdk_flow_del(struct netdev
*netdev
, const ovs_u128
*ufid
,
741 struct dpif_flow_stats
*stats OVS_UNUSED
)
743 struct rte_flow
*rte_flow
= ufid_to_rte_flow_find(ufid
);
749 return netdev_offload_dpdk_destroy_flow(netdev
, ufid
, rte_flow
);
753 netdev_offload_dpdk_init_flow_api(struct netdev
*netdev
)
755 return netdev_dpdk_flow_api_supported(netdev
) ? 0 : EOPNOTSUPP
;
758 const struct netdev_flow_api netdev_offload_dpdk
= {
759 .type
= "dpdk_flow_api",
760 .flow_put
= netdev_offload_dpdk_flow_put
,
761 .flow_del
= netdev_offload_dpdk_flow_del
,
762 .init_flow_api
= netdev_offload_dpdk_init_flow_api
,