2 * Copyright (c) 2014, 2015, 2016, 2017 Nicira, Inc.
3 * Copyright (c) 2019 Mellanox Technologies, Ltd.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
22 #include "dpif-netdev.h"
23 #include "netdev-offload-provider.h"
24 #include "netdev-provider.h"
25 #include "openvswitch/match.h"
26 #include "openvswitch/vlog.h"
30 VLOG_DEFINE_THIS_MODULE(netdev_offload_dpdk
);
35 * Below API is NOT thread safe in following terms:
37 * - The caller must be sure that none of these functions will be called
38 * simultaneously. Even for different 'netdev's.
40 * - The caller must be sure that 'netdev' will not be destructed/deallocated.
42 * - The caller must be sure that 'netdev' configuration will not be changed.
43 * For example, simultaneous call of 'netdev_reconfigure()' for the same
44 * 'netdev' is forbidden.
46 * For current implementation all above restrictions could be fulfilled by
47 * taking the datapath 'port_mutex' in lib/dpif-netdev.c. */
50 * A mapping from ufid to dpdk rte_flow.
52 static struct cmap ufid_to_rte_flow
= CMAP_INITIALIZER
;
54 struct ufid_to_rte_flow_data
{
55 struct cmap_node node
;
57 struct rte_flow
*rte_flow
;
60 /* Find rte_flow with @ufid. */
61 static struct rte_flow
*
62 ufid_to_rte_flow_find(const ovs_u128
*ufid
)
64 size_t hash
= hash_bytes(ufid
, sizeof *ufid
, 0);
65 struct ufid_to_rte_flow_data
*data
;
67 CMAP_FOR_EACH_WITH_HASH (data
, node
, hash
, &ufid_to_rte_flow
) {
68 if (ovs_u128_equals(*ufid
, data
->ufid
)) {
69 return data
->rte_flow
;
77 ufid_to_rte_flow_associate(const ovs_u128
*ufid
,
78 struct rte_flow
*rte_flow
)
80 size_t hash
= hash_bytes(ufid
, sizeof *ufid
, 0);
81 struct ufid_to_rte_flow_data
*data
= xzalloc(sizeof *data
);
84 * We should not simply overwrite an existing rte flow.
85 * We should have deleted it first before re-adding it.
86 * Thus, if following assert triggers, something is wrong:
87 * the rte_flow is not destroyed.
89 ovs_assert(ufid_to_rte_flow_find(ufid
) == NULL
);
92 data
->rte_flow
= rte_flow
;
94 cmap_insert(&ufid_to_rte_flow
,
95 CONST_CAST(struct cmap_node
*, &data
->node
), hash
);
99 ufid_to_rte_flow_disassociate(const ovs_u128
*ufid
)
101 size_t hash
= hash_bytes(ufid
, sizeof *ufid
, 0);
102 struct ufid_to_rte_flow_data
*data
;
104 CMAP_FOR_EACH_WITH_HASH (data
, node
, hash
, &ufid_to_rte_flow
) {
105 if (ovs_u128_equals(*ufid
, data
->ufid
)) {
106 cmap_remove(&ufid_to_rte_flow
,
107 CONST_CAST(struct cmap_node
*, &data
->node
), hash
);
108 ovsrcu_postpone(free
, data
);
113 VLOG_WARN("ufid "UUID_FMT
" is not associated with an rte flow\n",
114 UUID_ARGS((struct uuid
*) ufid
));
118 * To avoid individual xrealloc calls for each new element, a 'curent_max'
119 * is used to keep track of current allocated number of elements. Starts
120 * by 8 and doubles on each xrealloc call.
122 struct flow_patterns
{
123 struct rte_flow_item
*items
;
128 struct flow_actions
{
129 struct rte_flow_action
*actions
;
135 dump_flow_pattern(struct rte_flow_item
*item
)
139 if (!VLOG_IS_DBG_ENABLED() || item
->type
== RTE_FLOW_ITEM_TYPE_END
) {
145 if (item
->type
== RTE_FLOW_ITEM_TYPE_ETH
) {
146 const struct rte_flow_item_eth
*eth_spec
= item
->spec
;
147 const struct rte_flow_item_eth
*eth_mask
= item
->mask
;
149 ds_put_cstr(&s
, "rte flow eth pattern:\n");
152 " Spec: src="ETH_ADDR_FMT
", dst="ETH_ADDR_FMT
", "
153 "type=0x%04" PRIx16
"\n",
154 ETH_ADDR_BYTES_ARGS(eth_spec
->src
.addr_bytes
),
155 ETH_ADDR_BYTES_ARGS(eth_spec
->dst
.addr_bytes
),
156 ntohs(eth_spec
->type
));
158 ds_put_cstr(&s
, " Spec = null\n");
162 " Mask: src="ETH_ADDR_FMT
", dst="ETH_ADDR_FMT
", "
163 "type=0x%04"PRIx16
"\n",
164 ETH_ADDR_BYTES_ARGS(eth_mask
->src
.addr_bytes
),
165 ETH_ADDR_BYTES_ARGS(eth_mask
->dst
.addr_bytes
),
166 ntohs(eth_mask
->type
));
168 ds_put_cstr(&s
, " Mask = null\n");
172 if (item
->type
== RTE_FLOW_ITEM_TYPE_VLAN
) {
173 const struct rte_flow_item_vlan
*vlan_spec
= item
->spec
;
174 const struct rte_flow_item_vlan
*vlan_mask
= item
->mask
;
176 ds_put_cstr(&s
, "rte flow vlan pattern:\n");
179 " Spec: inner_type=0x%"PRIx16
", tci=0x%"PRIx16
"\n",
180 ntohs(vlan_spec
->inner_type
), ntohs(vlan_spec
->tci
));
182 ds_put_cstr(&s
, " Spec = null\n");
187 " Mask: inner_type=0x%"PRIx16
", tci=0x%"PRIx16
"\n",
188 ntohs(vlan_mask
->inner_type
), ntohs(vlan_mask
->tci
));
190 ds_put_cstr(&s
, " Mask = null\n");
194 if (item
->type
== RTE_FLOW_ITEM_TYPE_IPV4
) {
195 const struct rte_flow_item_ipv4
*ipv4_spec
= item
->spec
;
196 const struct rte_flow_item_ipv4
*ipv4_mask
= item
->mask
;
198 ds_put_cstr(&s
, "rte flow ipv4 pattern:\n");
201 " Spec: tos=0x%"PRIx8
", ttl=%"PRIx8
203 ", src="IP_FMT
", dst="IP_FMT
"\n",
204 ipv4_spec
->hdr
.type_of_service
,
205 ipv4_spec
->hdr
.time_to_live
,
206 ipv4_spec
->hdr
.next_proto_id
,
207 IP_ARGS(ipv4_spec
->hdr
.src_addr
),
208 IP_ARGS(ipv4_spec
->hdr
.dst_addr
));
210 ds_put_cstr(&s
, " Spec = null\n");
214 " Mask: tos=0x%"PRIx8
", ttl=%"PRIx8
216 ", src="IP_FMT
", dst="IP_FMT
"\n",
217 ipv4_mask
->hdr
.type_of_service
,
218 ipv4_mask
->hdr
.time_to_live
,
219 ipv4_mask
->hdr
.next_proto_id
,
220 IP_ARGS(ipv4_mask
->hdr
.src_addr
),
221 IP_ARGS(ipv4_mask
->hdr
.dst_addr
));
223 ds_put_cstr(&s
, " Mask = null\n");
227 if (item
->type
== RTE_FLOW_ITEM_TYPE_UDP
) {
228 const struct rte_flow_item_udp
*udp_spec
= item
->spec
;
229 const struct rte_flow_item_udp
*udp_mask
= item
->mask
;
231 ds_put_cstr(&s
, "rte flow udp pattern:\n");
234 " Spec: src_port=%"PRIu16
", dst_port=%"PRIu16
"\n",
235 ntohs(udp_spec
->hdr
.src_port
),
236 ntohs(udp_spec
->hdr
.dst_port
));
238 ds_put_cstr(&s
, " Spec = null\n");
242 " Mask: src_port=0x%"PRIx16
243 ", dst_port=0x%"PRIx16
"\n",
244 ntohs(udp_mask
->hdr
.src_port
),
245 ntohs(udp_mask
->hdr
.dst_port
));
247 ds_put_cstr(&s
, " Mask = null\n");
251 if (item
->type
== RTE_FLOW_ITEM_TYPE_SCTP
) {
252 const struct rte_flow_item_sctp
*sctp_spec
= item
->spec
;
253 const struct rte_flow_item_sctp
*sctp_mask
= item
->mask
;
255 ds_put_cstr(&s
, "rte flow sctp pattern:\n");
258 " Spec: src_port=%"PRIu16
", dst_port=%"PRIu16
"\n",
259 ntohs(sctp_spec
->hdr
.src_port
),
260 ntohs(sctp_spec
->hdr
.dst_port
));
262 ds_put_cstr(&s
, " Spec = null\n");
266 " Mask: src_port=0x%"PRIx16
267 ", dst_port=0x%"PRIx16
"\n",
268 ntohs(sctp_mask
->hdr
.src_port
),
269 ntohs(sctp_mask
->hdr
.dst_port
));
271 ds_put_cstr(&s
, " Mask = null\n");
275 if (item
->type
== RTE_FLOW_ITEM_TYPE_ICMP
) {
276 const struct rte_flow_item_icmp
*icmp_spec
= item
->spec
;
277 const struct rte_flow_item_icmp
*icmp_mask
= item
->mask
;
279 ds_put_cstr(&s
, "rte flow icmp pattern:\n");
282 " Spec: icmp_type=%"PRIu8
", icmp_code=%"PRIu8
"\n",
283 icmp_spec
->hdr
.icmp_type
,
284 icmp_spec
->hdr
.icmp_code
);
286 ds_put_cstr(&s
, " Spec = null\n");
290 " Mask: icmp_type=0x%"PRIx8
291 ", icmp_code=0x%"PRIx8
"\n",
292 icmp_spec
->hdr
.icmp_type
,
293 icmp_spec
->hdr
.icmp_code
);
295 ds_put_cstr(&s
, " Mask = null\n");
299 if (item
->type
== RTE_FLOW_ITEM_TYPE_TCP
) {
300 const struct rte_flow_item_tcp
*tcp_spec
= item
->spec
;
301 const struct rte_flow_item_tcp
*tcp_mask
= item
->mask
;
303 ds_put_cstr(&s
, "rte flow tcp pattern:\n");
306 " Spec: src_port=%"PRIu16
", dst_port=%"PRIu16
307 ", data_off=0x%"PRIx8
", tcp_flags=0x%"PRIx8
"\n",
308 ntohs(tcp_spec
->hdr
.src_port
),
309 ntohs(tcp_spec
->hdr
.dst_port
),
310 tcp_spec
->hdr
.data_off
,
311 tcp_spec
->hdr
.tcp_flags
);
313 ds_put_cstr(&s
, " Spec = null\n");
317 " Mask: src_port=%"PRIx16
", dst_port=%"PRIx16
318 ", data_off=0x%"PRIx8
", tcp_flags=0x%"PRIx8
"\n",
319 ntohs(tcp_mask
->hdr
.src_port
),
320 ntohs(tcp_mask
->hdr
.dst_port
),
321 tcp_mask
->hdr
.data_off
,
322 tcp_mask
->hdr
.tcp_flags
);
324 ds_put_cstr(&s
, " Mask = null\n");
328 VLOG_DBG("%s", ds_cstr(&s
));
333 add_flow_pattern(struct flow_patterns
*patterns
, enum rte_flow_item_type type
,
334 const void *spec
, const void *mask
)
336 int cnt
= patterns
->cnt
;
339 patterns
->current_max
= 8;
340 patterns
->items
= xcalloc(patterns
->current_max
,
341 sizeof *patterns
->items
);
342 } else if (cnt
== patterns
->current_max
) {
343 patterns
->current_max
*= 2;
344 patterns
->items
= xrealloc(patterns
->items
, patterns
->current_max
*
345 sizeof *patterns
->items
);
348 patterns
->items
[cnt
].type
= type
;
349 patterns
->items
[cnt
].spec
= spec
;
350 patterns
->items
[cnt
].mask
= mask
;
351 patterns
->items
[cnt
].last
= NULL
;
352 dump_flow_pattern(&patterns
->items
[cnt
]);
357 add_flow_action(struct flow_actions
*actions
, enum rte_flow_action_type type
,
360 int cnt
= actions
->cnt
;
363 actions
->current_max
= 8;
364 actions
->actions
= xcalloc(actions
->current_max
,
365 sizeof *actions
->actions
);
366 } else if (cnt
== actions
->current_max
) {
367 actions
->current_max
*= 2;
368 actions
->actions
= xrealloc(actions
->actions
, actions
->current_max
*
369 sizeof *actions
->actions
);
372 actions
->actions
[cnt
].type
= type
;
373 actions
->actions
[cnt
].conf
= conf
;
378 free_flow_actions(struct flow_actions
*actions
)
382 for (i
= 0; i
< actions
->cnt
; i
++) {
383 if (actions
->actions
[i
].conf
) {
384 free(CONST_CAST(void *, actions
->actions
[i
].conf
));
387 free(actions
->actions
);
388 actions
->actions
= NULL
;
393 struct rte_flow_item_eth eth
;
394 struct rte_flow_item_vlan vlan
;
395 struct rte_flow_item_ipv4 ipv4
;
397 struct rte_flow_item_tcp tcp
;
398 struct rte_flow_item_udp udp
;
399 struct rte_flow_item_sctp sctp
;
400 struct rte_flow_item_icmp icmp
;
405 parse_flow_match(struct flow_patterns
*patterns
,
406 struct flow_items
*spec
,
407 struct flow_items
*mask
,
408 const struct match
*match
)
413 if (!eth_addr_is_zero(match
->wc
.masks
.dl_src
) ||
414 !eth_addr_is_zero(match
->wc
.masks
.dl_dst
)) {
415 memcpy(&spec
->eth
.dst
, &match
->flow
.dl_dst
, sizeof spec
->eth
.dst
);
416 memcpy(&spec
->eth
.src
, &match
->flow
.dl_src
, sizeof spec
->eth
.src
);
417 spec
->eth
.type
= match
->flow
.dl_type
;
419 memcpy(&mask
->eth
.dst
, &match
->wc
.masks
.dl_dst
, sizeof mask
->eth
.dst
);
420 memcpy(&mask
->eth
.src
, &match
->wc
.masks
.dl_src
, sizeof mask
->eth
.src
);
421 mask
->eth
.type
= match
->wc
.masks
.dl_type
;
423 add_flow_pattern(patterns
, RTE_FLOW_ITEM_TYPE_ETH
,
424 &spec
->eth
, &mask
->eth
);
427 * If user specifies a flow (like UDP flow) without L2 patterns,
428 * OVS will at least set the dl_type. Normally, it's enough to
429 * create an eth pattern just with it. Unluckily, some Intel's
430 * NIC (such as XL710) doesn't support that. Below is a workaround,
431 * which simply matches any L2 pkts.
433 add_flow_pattern(patterns
, RTE_FLOW_ITEM_TYPE_ETH
, NULL
, NULL
);
437 if (match
->wc
.masks
.vlans
[0].tci
&& match
->flow
.vlans
[0].tci
) {
438 spec
->vlan
.tci
= match
->flow
.vlans
[0].tci
& ~htons(VLAN_CFI
);
439 mask
->vlan
.tci
= match
->wc
.masks
.vlans
[0].tci
& ~htons(VLAN_CFI
);
441 /* Match any protocols. */
442 mask
->vlan
.inner_type
= 0;
444 add_flow_pattern(patterns
, RTE_FLOW_ITEM_TYPE_VLAN
,
445 &spec
->vlan
, &mask
->vlan
);
449 if (match
->flow
.dl_type
== htons(ETH_TYPE_IP
)) {
450 spec
->ipv4
.hdr
.type_of_service
= match
->flow
.nw_tos
;
451 spec
->ipv4
.hdr
.time_to_live
= match
->flow
.nw_ttl
;
452 spec
->ipv4
.hdr
.next_proto_id
= match
->flow
.nw_proto
;
453 spec
->ipv4
.hdr
.src_addr
= match
->flow
.nw_src
;
454 spec
->ipv4
.hdr
.dst_addr
= match
->flow
.nw_dst
;
456 mask
->ipv4
.hdr
.type_of_service
= match
->wc
.masks
.nw_tos
;
457 mask
->ipv4
.hdr
.time_to_live
= match
->wc
.masks
.nw_ttl
;
458 mask
->ipv4
.hdr
.next_proto_id
= match
->wc
.masks
.nw_proto
;
459 mask
->ipv4
.hdr
.src_addr
= match
->wc
.masks
.nw_src
;
460 mask
->ipv4
.hdr
.dst_addr
= match
->wc
.masks
.nw_dst
;
462 add_flow_pattern(patterns
, RTE_FLOW_ITEM_TYPE_IPV4
,
463 &spec
->ipv4
, &mask
->ipv4
);
465 /* Save proto for L4 protocol setup. */
466 proto
= spec
->ipv4
.hdr
.next_proto_id
&
467 mask
->ipv4
.hdr
.next_proto_id
;
470 if (proto
!= IPPROTO_ICMP
&& proto
!= IPPROTO_UDP
&&
471 proto
!= IPPROTO_SCTP
&& proto
!= IPPROTO_TCP
&&
472 (match
->wc
.masks
.tp_src
||
473 match
->wc
.masks
.tp_dst
||
474 match
->wc
.masks
.tcp_flags
)) {
475 VLOG_DBG("L4 Protocol (%u) not supported", proto
);
479 if ((match
->wc
.masks
.tp_src
&& match
->wc
.masks
.tp_src
!= OVS_BE16_MAX
) ||
480 (match
->wc
.masks
.tp_dst
&& match
->wc
.masks
.tp_dst
!= OVS_BE16_MAX
)) {
486 spec
->tcp
.hdr
.src_port
= match
->flow
.tp_src
;
487 spec
->tcp
.hdr
.dst_port
= match
->flow
.tp_dst
;
488 spec
->tcp
.hdr
.data_off
= ntohs(match
->flow
.tcp_flags
) >> 8;
489 spec
->tcp
.hdr
.tcp_flags
= ntohs(match
->flow
.tcp_flags
) & 0xff;
491 mask
->tcp
.hdr
.src_port
= match
->wc
.masks
.tp_src
;
492 mask
->tcp
.hdr
.dst_port
= match
->wc
.masks
.tp_dst
;
493 mask
->tcp
.hdr
.data_off
= ntohs(match
->wc
.masks
.tcp_flags
) >> 8;
494 mask
->tcp
.hdr
.tcp_flags
= ntohs(match
->wc
.masks
.tcp_flags
) & 0xff;
496 add_flow_pattern(patterns
, RTE_FLOW_ITEM_TYPE_TCP
,
497 &spec
->tcp
, &mask
->tcp
);
499 /* proto == TCP and ITEM_TYPE_TCP, thus no need for proto match. */
500 mask
->ipv4
.hdr
.next_proto_id
= 0;
504 spec
->udp
.hdr
.src_port
= match
->flow
.tp_src
;
505 spec
->udp
.hdr
.dst_port
= match
->flow
.tp_dst
;
507 mask
->udp
.hdr
.src_port
= match
->wc
.masks
.tp_src
;
508 mask
->udp
.hdr
.dst_port
= match
->wc
.masks
.tp_dst
;
510 add_flow_pattern(patterns
, RTE_FLOW_ITEM_TYPE_UDP
,
511 &spec
->udp
, &mask
->udp
);
513 /* proto == UDP and ITEM_TYPE_UDP, thus no need for proto match. */
514 mask
->ipv4
.hdr
.next_proto_id
= 0;
518 spec
->sctp
.hdr
.src_port
= match
->flow
.tp_src
;
519 spec
->sctp
.hdr
.dst_port
= match
->flow
.tp_dst
;
521 mask
->sctp
.hdr
.src_port
= match
->wc
.masks
.tp_src
;
522 mask
->sctp
.hdr
.dst_port
= match
->wc
.masks
.tp_dst
;
524 add_flow_pattern(patterns
, RTE_FLOW_ITEM_TYPE_SCTP
,
525 &spec
->sctp
, &mask
->sctp
);
527 /* proto == SCTP and ITEM_TYPE_SCTP, thus no need for proto match. */
528 mask
->ipv4
.hdr
.next_proto_id
= 0;
532 spec
->icmp
.hdr
.icmp_type
= (uint8_t) ntohs(match
->flow
.tp_src
);
533 spec
->icmp
.hdr
.icmp_code
= (uint8_t) ntohs(match
->flow
.tp_dst
);
535 mask
->icmp
.hdr
.icmp_type
= (uint8_t) ntohs(match
->wc
.masks
.tp_src
);
536 mask
->icmp
.hdr
.icmp_code
= (uint8_t) ntohs(match
->wc
.masks
.tp_dst
);
538 add_flow_pattern(patterns
, RTE_FLOW_ITEM_TYPE_ICMP
,
539 &spec
->icmp
, &mask
->icmp
);
541 /* proto == ICMP and ITEM_TYPE_ICMP, thus no need for proto match. */
542 mask
->ipv4
.hdr
.next_proto_id
= 0;
546 add_flow_pattern(patterns
, RTE_FLOW_ITEM_TYPE_END
, NULL
, NULL
);
552 add_flow_mark_rss_actions(struct flow_actions
*actions
,
554 const struct netdev
*netdev
)
556 struct rte_flow_action_mark
*mark
;
557 struct action_rss_data
{
558 struct rte_flow_action_rss conf
;
561 BUILD_ASSERT_DECL(offsetof(struct action_rss_data
, conf
) == 0);
564 mark
= xzalloc(sizeof *mark
);
566 mark
->id
= flow_mark
;
567 add_flow_action(actions
, RTE_FLOW_ACTION_TYPE_MARK
, mark
);
569 rss_data
= xmalloc(sizeof *rss_data
+
570 netdev_n_rxq(netdev
) * sizeof rss_data
->queue
[0]);
571 *rss_data
= (struct action_rss_data
) {
572 .conf
= (struct rte_flow_action_rss
) {
573 .func
= RTE_ETH_HASH_FUNCTION_DEFAULT
,
576 .queue_num
= netdev_n_rxq(netdev
),
577 .queue
= rss_data
->queue
,
583 /* Override queue array with default. */
584 for (i
= 0; i
< netdev_n_rxq(netdev
); i
++) {
585 rss_data
->queue
[i
] = i
;
588 add_flow_action(actions
, RTE_FLOW_ACTION_TYPE_RSS
, &rss_data
->conf
);
589 add_flow_action(actions
, RTE_FLOW_ACTION_TYPE_END
, NULL
);
593 netdev_offload_dpdk_add_flow(struct netdev
*netdev
,
594 const struct match
*match
,
595 struct nlattr
*nl_actions OVS_UNUSED
,
596 size_t actions_len OVS_UNUSED
,
597 const ovs_u128
*ufid
,
598 struct offload_info
*info
)
600 const struct rte_flow_attr flow_attr
= {
606 struct flow_patterns patterns
= { .items
= NULL
, .cnt
= 0 };
607 struct flow_actions actions
= { .actions
= NULL
, .cnt
= 0 };
608 struct rte_flow
*flow
;
609 struct rte_flow_error error
;
611 struct flow_items spec
, mask
;
613 memset(&spec
, 0, sizeof spec
);
614 memset(&mask
, 0, sizeof mask
);
616 ret
= parse_flow_match(&patterns
, &spec
, &mask
, match
);
621 add_flow_mark_rss_actions(&actions
, info
->flow_mark
, netdev
);
623 flow
= netdev_dpdk_rte_flow_create(netdev
, &flow_attr
,
625 actions
.actions
, &error
);
628 VLOG_ERR("%s: rte flow creat error: %u : message : %s\n",
629 netdev_get_name(netdev
), error
.type
, error
.message
);
633 ufid_to_rte_flow_associate(ufid
, flow
);
634 VLOG_DBG("%s: installed flow %p by ufid "UUID_FMT
"\n",
635 netdev_get_name(netdev
), flow
, UUID_ARGS((struct uuid
*)ufid
));
638 free(patterns
.items
);
639 free_flow_actions(&actions
);
644 * Check if any unsupported flow patterns are specified.
647 netdev_offload_dpdk_validate_flow(const struct match
*match
)
649 struct match match_zero_wc
;
650 const struct flow
*masks
= &match
->wc
.masks
;
652 /* Create a wc-zeroed version of flow. */
653 match_init(&match_zero_wc
, &match
->flow
, &match
->wc
);
655 if (!is_all_zeros(&match_zero_wc
.flow
.tunnel
,
656 sizeof match_zero_wc
.flow
.tunnel
)) {
660 if (masks
->metadata
|| masks
->skb_priority
||
661 masks
->pkt_mark
|| masks
->dp_hash
) {
665 /* recirc id must be zero. */
666 if (match_zero_wc
.flow
.recirc_id
) {
670 if (masks
->ct_state
|| masks
->ct_nw_proto
||
671 masks
->ct_zone
|| masks
->ct_mark
||
672 !ovs_u128_is_zero(masks
->ct_label
)) {
676 if (masks
->conj_id
|| masks
->actset_output
) {
680 /* Unsupported L2. */
681 if (!is_all_zeros(masks
->mpls_lse
, sizeof masks
->mpls_lse
)) {
685 /* Unsupported L3. */
686 if (masks
->ipv6_label
|| masks
->ct_nw_src
|| masks
->ct_nw_dst
||
687 !is_all_zeros(&masks
->ipv6_src
, sizeof masks
->ipv6_src
) ||
688 !is_all_zeros(&masks
->ipv6_dst
, sizeof masks
->ipv6_dst
) ||
689 !is_all_zeros(&masks
->ct_ipv6_src
, sizeof masks
->ct_ipv6_src
) ||
690 !is_all_zeros(&masks
->ct_ipv6_dst
, sizeof masks
->ct_ipv6_dst
) ||
691 !is_all_zeros(&masks
->nd_target
, sizeof masks
->nd_target
) ||
692 !is_all_zeros(&masks
->nsh
, sizeof masks
->nsh
) ||
693 !is_all_zeros(&masks
->arp_sha
, sizeof masks
->arp_sha
) ||
694 !is_all_zeros(&masks
->arp_tha
, sizeof masks
->arp_tha
)) {
698 /* If fragmented, then don't HW accelerate - for now. */
699 if (match_zero_wc
.flow
.nw_frag
) {
703 /* Unsupported L4. */
704 if (masks
->igmp_group_ip4
|| masks
->ct_tp_src
|| masks
->ct_tp_dst
) {
711 VLOG_ERR("cannot HW accelerate this flow due to unsupported protocols");
716 netdev_offload_dpdk_destroy_flow(struct netdev
*netdev
,
717 const ovs_u128
*ufid
,
718 struct rte_flow
*rte_flow
)
720 struct rte_flow_error error
;
721 int ret
= netdev_dpdk_rte_flow_destroy(netdev
, rte_flow
, &error
);
724 ufid_to_rte_flow_disassociate(ufid
);
725 VLOG_DBG("%s: removed rte flow %p associated with ufid " UUID_FMT
"\n",
726 netdev_get_name(netdev
), rte_flow
,
727 UUID_ARGS((struct uuid
*)ufid
));
729 VLOG_ERR("%s: rte flow destroy error: %u : message : %s\n",
730 netdev_get_name(netdev
), error
.type
, error
.message
);
737 netdev_offload_dpdk_flow_put(struct netdev
*netdev
, struct match
*match
,
738 struct nlattr
*actions
, size_t actions_len
,
739 const ovs_u128
*ufid
, struct offload_info
*info
,
740 struct dpif_flow_stats
*stats
)
742 struct rte_flow
*rte_flow
;
746 * If an old rte_flow exists, it means it's a flow modification.
747 * Here destroy the old rte flow first before adding a new one.
749 rte_flow
= ufid_to_rte_flow_find(ufid
);
751 ret
= netdev_offload_dpdk_destroy_flow(netdev
, ufid
, rte_flow
);
757 ret
= netdev_offload_dpdk_validate_flow(match
);
763 memset(stats
, 0, sizeof *stats
);
765 return netdev_offload_dpdk_add_flow(netdev
, match
, actions
,
766 actions_len
, ufid
, info
);
770 netdev_offload_dpdk_flow_del(struct netdev
*netdev
, const ovs_u128
*ufid
,
771 struct dpif_flow_stats
*stats
)
773 struct rte_flow
*rte_flow
= ufid_to_rte_flow_find(ufid
);
780 memset(stats
, 0, sizeof *stats
);
782 return netdev_offload_dpdk_destroy_flow(netdev
, ufid
, rte_flow
);
786 netdev_offload_dpdk_init_flow_api(struct netdev
*netdev
)
788 return netdev_dpdk_flow_api_supported(netdev
) ? 0 : EOPNOTSUPP
;
791 const struct netdev_flow_api netdev_offload_dpdk
= {
792 .type
= "dpdk_flow_api",
793 .flow_put
= netdev_offload_dpdk_flow_put
,
794 .flow_del
= netdev_offload_dpdk_flow_del
,
795 .init_flow_api
= netdev_offload_dpdk_init_flow_api
,