4 * Copyright 2015 6WIND S.A.
5 * Copyright 2015 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
43 #pragma GCC diagnostic ignored "-Wpedantic"
45 #include <infiniband/verbs.h>
47 #pragma GCC diagnostic error "-Wpedantic"
50 /* DPDK headers don't like -pedantic. */
52 #pragma GCC diagnostic ignored "-Wpedantic"
54 #include <rte_ether.h>
55 #include <rte_malloc.h>
56 #include <rte_ethdev.h>
57 #include <rte_common.h>
59 #pragma GCC diagnostic error "-Wpedantic"
63 #include "mlx5_rxtx.h"
65 struct fdir_flow_desc
{
72 enum hash_rxq_type type
;
75 struct mlx5_fdir_filter
{
76 LIST_ENTRY(mlx5_fdir_filter
) next
;
77 uint16_t queue
; /* Queue assigned to if FDIR match. */
78 enum rte_eth_fdir_behavior behavior
;
79 struct fdir_flow_desc desc
;
80 struct ibv_exp_flow
*flow
;
83 LIST_HEAD(fdir_filter_list
, mlx5_fdir_filter
);
86 * Convert struct rte_eth_fdir_filter to mlx5 filter descriptor.
88 * @param[in] fdir_filter
89 * DPDK filter structure to convert.
91 * Resulting mlx5 filter descriptor.
96 fdir_filter_to_flow_desc(const struct rte_eth_fdir_filter
*fdir_filter
,
97 struct fdir_flow_desc
*desc
, enum rte_fdir_mode mode
)
99 /* Initialize descriptor. */
100 memset(desc
, 0, sizeof(*desc
));
103 desc
->vlan_tag
= fdir_filter
->input
.flow_ext
.vlan_tci
;
105 /* Set MAC address. */
106 if (mode
== RTE_FDIR_MODE_PERFECT_MAC_VLAN
) {
107 rte_memcpy(desc
->mac
,
108 fdir_filter
->input
.flow
.mac_vlan_flow
.mac_addr
.
111 desc
->type
= HASH_RXQ_ETH
;
116 switch (fdir_filter
->input
.flow_type
) {
117 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP
:
118 desc
->type
= HASH_RXQ_UDPV4
;
120 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP
:
121 desc
->type
= HASH_RXQ_TCPV4
;
123 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER
:
124 desc
->type
= HASH_RXQ_IPV4
;
126 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP
:
127 desc
->type
= HASH_RXQ_UDPV6
;
129 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP
:
130 desc
->type
= HASH_RXQ_TCPV6
;
132 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER
:
133 desc
->type
= HASH_RXQ_IPV6
;
139 /* Set flow values */
140 switch (fdir_filter
->input
.flow_type
) {
141 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP
:
142 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP
:
143 desc
->src_port
= fdir_filter
->input
.flow
.udp4_flow
.src_port
;
144 desc
->dst_port
= fdir_filter
->input
.flow
.udp4_flow
.dst_port
;
145 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER
:
146 desc
->src_ip
[0] = fdir_filter
->input
.flow
.ip4_flow
.src_ip
;
147 desc
->dst_ip
[0] = fdir_filter
->input
.flow
.ip4_flow
.dst_ip
;
149 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP
:
150 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP
:
151 desc
->src_port
= fdir_filter
->input
.flow
.udp6_flow
.src_port
;
152 desc
->dst_port
= fdir_filter
->input
.flow
.udp6_flow
.dst_port
;
154 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER
:
155 rte_memcpy(desc
->src_ip
,
156 fdir_filter
->input
.flow
.ipv6_flow
.src_ip
,
157 sizeof(desc
->src_ip
));
158 rte_memcpy(desc
->dst_ip
,
159 fdir_filter
->input
.flow
.ipv6_flow
.dst_ip
,
160 sizeof(desc
->dst_ip
));
168 * Check if two flow descriptors overlap according to configured mask.
171 * Private structure that provides flow director mask.
173 * First flow descriptor to compare.
175 * Second flow descriptor to compare.
178 * Nonzero if descriptors overlap.
181 priv_fdir_overlap(const struct priv
*priv
,
182 const struct fdir_flow_desc
*desc1
,
183 const struct fdir_flow_desc
*desc2
)
185 const struct rte_eth_fdir_masks
*mask
=
186 &priv
->dev
->data
->dev_conf
.fdir_conf
.mask
;
189 if (desc1
->type
!= desc2
->type
)
191 /* Ignore non masked bits. */
192 for (i
= 0; i
!= RTE_DIM(desc1
->mac
); ++i
)
193 if ((desc1
->mac
[i
] & mask
->mac_addr_byte_mask
) !=
194 (desc2
->mac
[i
] & mask
->mac_addr_byte_mask
))
196 if (((desc1
->src_port
& mask
->src_port_mask
) !=
197 (desc2
->src_port
& mask
->src_port_mask
)) ||
198 ((desc1
->dst_port
& mask
->dst_port_mask
) !=
199 (desc2
->dst_port
& mask
->dst_port_mask
)))
201 switch (desc1
->type
) {
205 if (((desc1
->src_ip
[0] & mask
->ipv4_mask
.src_ip
) !=
206 (desc2
->src_ip
[0] & mask
->ipv4_mask
.src_ip
)) ||
207 ((desc1
->dst_ip
[0] & mask
->ipv4_mask
.dst_ip
) !=
208 (desc2
->dst_ip
[0] & mask
->ipv4_mask
.dst_ip
)))
214 for (i
= 0; i
!= RTE_DIM(desc1
->src_ip
); ++i
)
215 if (((desc1
->src_ip
[i
] & mask
->ipv6_mask
.src_ip
[i
]) !=
216 (desc2
->src_ip
[i
] & mask
->ipv6_mask
.src_ip
[i
])) ||
217 ((desc1
->dst_ip
[i
] & mask
->ipv6_mask
.dst_ip
[i
]) !=
218 (desc2
->dst_ip
[i
] & mask
->ipv6_mask
.dst_ip
[i
])))
228 * Create flow director steering rule for a specific filter.
232 * @param mlx5_fdir_filter
233 * Filter to create a steering rule for.
235 * Flow director queue for matching packets.
238 * 0 on success, errno value on failure.
241 priv_fdir_flow_add(struct priv
*priv
,
242 struct mlx5_fdir_filter
*mlx5_fdir_filter
,
243 struct fdir_queue
*fdir_queue
)
245 struct ibv_exp_flow
*flow
;
246 struct fdir_flow_desc
*desc
= &mlx5_fdir_filter
->desc
;
247 enum rte_fdir_mode fdir_mode
=
248 priv
->dev
->data
->dev_conf
.fdir_conf
.mode
;
249 struct rte_eth_fdir_masks
*mask
=
250 &priv
->dev
->data
->dev_conf
.fdir_conf
.mask
;
251 FLOW_ATTR_SPEC_ETH(data
, priv_flow_attr(priv
, NULL
, 0, desc
->type
));
252 struct ibv_exp_flow_attr
*attr
= &data
->attr
;
253 uintptr_t spec_offset
= (uintptr_t)&data
->spec
;
254 struct ibv_exp_flow_spec_eth
*spec_eth
;
255 struct ibv_exp_flow_spec_ipv4
*spec_ipv4
;
256 struct ibv_exp_flow_spec_ipv6
*spec_ipv6
;
257 struct ibv_exp_flow_spec_tcp_udp
*spec_tcp_udp
;
258 struct mlx5_fdir_filter
*iter_fdir_filter
;
261 /* Abort if an existing flow overlaps this one to avoid packet
262 * duplication, even if it targets another queue. */
263 LIST_FOREACH(iter_fdir_filter
, priv
->fdir_filter_list
, next
)
264 if ((iter_fdir_filter
!= mlx5_fdir_filter
) &&
265 (iter_fdir_filter
->flow
!= NULL
) &&
266 (priv_fdir_overlap(priv
,
267 &mlx5_fdir_filter
->desc
,
268 &iter_fdir_filter
->desc
)))
272 * No padding must be inserted by the compiler between attr and spec.
273 * This layout is expected by libibverbs.
275 assert(((uint8_t *)attr
+ sizeof(*attr
)) == (uint8_t *)spec_offset
);
276 priv_flow_attr(priv
, attr
, sizeof(data
), desc
->type
);
278 /* Set Ethernet spec */
279 spec_eth
= (struct ibv_exp_flow_spec_eth
*)spec_offset
;
281 /* The first specification must be Ethernet. */
282 assert(spec_eth
->type
== IBV_EXP_FLOW_SPEC_ETH
);
283 assert(spec_eth
->size
== sizeof(*spec_eth
));
286 spec_eth
->val
.vlan_tag
= desc
->vlan_tag
& mask
->vlan_tci_mask
;
287 spec_eth
->mask
.vlan_tag
= mask
->vlan_tci_mask
;
289 /* Update priority */
292 if (fdir_mode
== RTE_FDIR_MODE_PERFECT_MAC_VLAN
) {
294 for (i
= 0; i
!= RTE_DIM(spec_eth
->mask
.dst_mac
); ++i
) {
295 spec_eth
->val
.dst_mac
[i
] =
296 desc
->mac
[i
] & mask
->mac_addr_byte_mask
;
297 spec_eth
->mask
.dst_mac
[i
] = mask
->mac_addr_byte_mask
;
302 switch (desc
->type
) {
306 spec_offset
+= spec_eth
->size
;
309 spec_ipv4
= (struct ibv_exp_flow_spec_ipv4
*)spec_offset
;
311 /* The second specification must be IP. */
312 assert(spec_ipv4
->type
== IBV_EXP_FLOW_SPEC_IPV4
);
313 assert(spec_ipv4
->size
== sizeof(*spec_ipv4
));
315 spec_ipv4
->val
.src_ip
=
316 desc
->src_ip
[0] & mask
->ipv4_mask
.src_ip
;
317 spec_ipv4
->val
.dst_ip
=
318 desc
->dst_ip
[0] & mask
->ipv4_mask
.dst_ip
;
319 spec_ipv4
->mask
.src_ip
= mask
->ipv4_mask
.src_ip
;
320 spec_ipv4
->mask
.dst_ip
= mask
->ipv4_mask
.dst_ip
;
322 /* Update priority */
325 if (desc
->type
== HASH_RXQ_IPV4
)
328 spec_offset
+= spec_ipv4
->size
;
333 spec_offset
+= spec_eth
->size
;
336 spec_ipv6
= (struct ibv_exp_flow_spec_ipv6
*)spec_offset
;
338 /* The second specification must be IP. */
339 assert(spec_ipv6
->type
== IBV_EXP_FLOW_SPEC_IPV6
);
340 assert(spec_ipv6
->size
== sizeof(*spec_ipv6
));
342 for (i
= 0; i
!= RTE_DIM(desc
->src_ip
); ++i
) {
343 ((uint32_t *)spec_ipv6
->val
.src_ip
)[i
] =
344 desc
->src_ip
[i
] & mask
->ipv6_mask
.src_ip
[i
];
345 ((uint32_t *)spec_ipv6
->val
.dst_ip
)[i
] =
346 desc
->dst_ip
[i
] & mask
->ipv6_mask
.dst_ip
[i
];
348 rte_memcpy(spec_ipv6
->mask
.src_ip
,
349 mask
->ipv6_mask
.src_ip
,
350 sizeof(spec_ipv6
->mask
.src_ip
));
351 rte_memcpy(spec_ipv6
->mask
.dst_ip
,
352 mask
->ipv6_mask
.dst_ip
,
353 sizeof(spec_ipv6
->mask
.dst_ip
));
355 /* Update priority */
358 if (desc
->type
== HASH_RXQ_IPV6
)
361 spec_offset
+= spec_ipv6
->size
;
364 ERROR("invalid flow attribute type");
368 /* Set TCP/UDP flow specification. */
369 spec_tcp_udp
= (struct ibv_exp_flow_spec_tcp_udp
*)spec_offset
;
371 /* The third specification must be TCP/UDP. */
372 assert(spec_tcp_udp
->type
== IBV_EXP_FLOW_SPEC_TCP
||
373 spec_tcp_udp
->type
== IBV_EXP_FLOW_SPEC_UDP
);
374 assert(spec_tcp_udp
->size
== sizeof(*spec_tcp_udp
));
376 spec_tcp_udp
->val
.src_port
= desc
->src_port
& mask
->src_port_mask
;
377 spec_tcp_udp
->val
.dst_port
= desc
->dst_port
& mask
->dst_port_mask
;
378 spec_tcp_udp
->mask
.src_port
= mask
->src_port_mask
;
379 spec_tcp_udp
->mask
.dst_port
= mask
->dst_port_mask
;
381 /* Update priority */
387 flow
= ibv_exp_create_flow(fdir_queue
->qp
, attr
);
389 /* It's not clear whether errno is always set in this case. */
390 ERROR("%p: flow director configuration failed, errno=%d: %s",
392 (errno
? strerror(errno
) : "Unknown error"));
398 DEBUG("%p: added flow director rule (%p)", (void *)priv
, (void *)flow
);
399 mlx5_fdir_filter
->flow
= flow
;
404 * Destroy a flow director queue.
407 * Flow director queue to be destroyed.
410 priv_fdir_queue_destroy(struct priv
*priv
, struct fdir_queue
*fdir_queue
)
412 struct mlx5_fdir_filter
*fdir_filter
;
414 /* Disable filter flows still applying to this queue. */
415 LIST_FOREACH(fdir_filter
, priv
->fdir_filter_list
, next
) {
416 unsigned int idx
= fdir_filter
->queue
;
417 struct rxq_ctrl
*rxq_ctrl
=
418 container_of((*priv
->rxqs
)[idx
], struct rxq_ctrl
, rxq
);
420 assert(idx
< priv
->rxqs_n
);
421 if (fdir_queue
== rxq_ctrl
->fdir_queue
&&
422 fdir_filter
->flow
!= NULL
) {
423 claim_zero(ibv_exp_destroy_flow(fdir_filter
->flow
));
424 fdir_filter
->flow
= NULL
;
427 assert(fdir_queue
->qp
);
428 claim_zero(ibv_destroy_qp(fdir_queue
->qp
));
429 assert(fdir_queue
->ind_table
);
430 claim_zero(ibv_exp_destroy_rwq_ind_table(fdir_queue
->ind_table
));
432 claim_zero(ibv_exp_destroy_wq(fdir_queue
->wq
));
434 claim_zero(ibv_destroy_cq(fdir_queue
->cq
));
436 memset(fdir_queue
, 0x2a, sizeof(*fdir_queue
));
438 rte_free(fdir_queue
);
442 * Create a flow director queue.
447 * Work queue to route matched packets to, NULL if one needs to
451 * Related flow director queue on success, NULL otherwise.
453 static struct fdir_queue
*
454 priv_fdir_queue_create(struct priv
*priv
, struct ibv_exp_wq
*wq
,
457 struct fdir_queue
*fdir_queue
;
459 fdir_queue
= rte_calloc_socket(__func__
, 1, sizeof(*fdir_queue
),
462 ERROR("cannot allocate flow director queue");
468 fdir_queue
->cq
= ibv_exp_create_cq(
469 priv
->ctx
, 1, NULL
, NULL
, 0,
470 &(struct ibv_exp_cq_init_attr
){
473 if (!fdir_queue
->cq
) {
474 ERROR("cannot create flow director CQ");
477 fdir_queue
->wq
= ibv_exp_create_wq(
479 &(struct ibv_exp_wq_init_attr
){
480 .wq_type
= IBV_EXP_WQT_RQ
,
484 .cq
= fdir_queue
->cq
,
486 if (!fdir_queue
->wq
) {
487 ERROR("cannot create flow director WQ");
492 fdir_queue
->ind_table
= ibv_exp_create_rwq_ind_table(
494 &(struct ibv_exp_rwq_ind_table_init_attr
){
496 .log_ind_tbl_size
= 0,
500 if (!fdir_queue
->ind_table
) {
501 ERROR("cannot create flow director indirection table");
504 fdir_queue
->qp
= ibv_exp_create_qp(
506 &(struct ibv_exp_qp_init_attr
){
507 .qp_type
= IBV_QPT_RAW_PACKET
,
509 IBV_EXP_QP_INIT_ATTR_PD
|
510 IBV_EXP_QP_INIT_ATTR_PORT
|
511 IBV_EXP_QP_INIT_ATTR_RX_HASH
,
513 .rx_hash_conf
= &(struct ibv_exp_rx_hash_conf
){
515 IBV_EXP_RX_HASH_FUNC_TOEPLITZ
,
516 .rx_hash_key_len
= rss_hash_default_key_len
,
517 .rx_hash_key
= rss_hash_default_key
,
518 .rx_hash_fields_mask
= 0,
519 .rwq_ind_tbl
= fdir_queue
->ind_table
,
521 .port_num
= priv
->port
,
523 if (!fdir_queue
->qp
) {
524 ERROR("cannot create flow director hash RX QP");
530 assert(!fdir_queue
->qp
);
531 if (fdir_queue
->ind_table
)
532 claim_zero(ibv_exp_destroy_rwq_ind_table
533 (fdir_queue
->ind_table
));
535 claim_zero(ibv_exp_destroy_wq(fdir_queue
->wq
));
537 claim_zero(ibv_destroy_cq(fdir_queue
->cq
));
538 rte_free(fdir_queue
);
543 * Get flow director queue for a specific RX queue, create it in case
552 * Related flow director queue on success, NULL otherwise.
554 static struct fdir_queue
*
555 priv_get_fdir_queue(struct priv
*priv
, uint16_t idx
)
557 struct rxq_ctrl
*rxq_ctrl
=
558 container_of((*priv
->rxqs
)[idx
], struct rxq_ctrl
, rxq
);
559 struct fdir_queue
*fdir_queue
= rxq_ctrl
->fdir_queue
;
561 assert(rxq_ctrl
->wq
);
562 if (fdir_queue
== NULL
) {
563 fdir_queue
= priv_fdir_queue_create(priv
, rxq_ctrl
->wq
,
565 rxq_ctrl
->fdir_queue
= fdir_queue
;
571 * Get or flow director drop queue. Create it if it does not exist.
577 * Flow director drop queue on success, NULL otherwise.
579 static struct fdir_queue
*
580 priv_get_fdir_drop_queue(struct priv
*priv
)
582 struct fdir_queue
*fdir_queue
= priv
->fdir_drop_queue
;
584 if (fdir_queue
== NULL
) {
585 unsigned int socket
= SOCKET_ID_ANY
;
587 /* Select a known NUMA socket if possible. */
588 if (priv
->rxqs_n
&& (*priv
->rxqs
)[0])
589 socket
= container_of((*priv
->rxqs
)[0],
590 struct rxq_ctrl
, rxq
)->socket
;
591 fdir_queue
= priv_fdir_queue_create(priv
, NULL
, socket
);
592 priv
->fdir_drop_queue
= fdir_queue
;
598 * Enable flow director filter and create steering rules.
602 * @param mlx5_fdir_filter
603 * Filter to create steering rule for.
606 * 0 on success, errno value on failure.
609 priv_fdir_filter_enable(struct priv
*priv
,
610 struct mlx5_fdir_filter
*mlx5_fdir_filter
)
612 struct fdir_queue
*fdir_queue
;
614 /* Check if flow already exists. */
615 if (mlx5_fdir_filter
->flow
!= NULL
)
618 /* Get fdir_queue for specific queue. */
619 if (mlx5_fdir_filter
->behavior
== RTE_ETH_FDIR_REJECT
)
620 fdir_queue
= priv_get_fdir_drop_queue(priv
);
622 fdir_queue
= priv_get_fdir_queue(priv
,
623 mlx5_fdir_filter
->queue
);
625 if (fdir_queue
== NULL
) {
626 ERROR("failed to create flow director rxq for queue %d",
627 mlx5_fdir_filter
->queue
);
632 return priv_fdir_flow_add(priv
, mlx5_fdir_filter
, fdir_queue
);
636 * Initialize flow director filters list.
642 * 0 on success, errno value on failure.
645 fdir_init_filters_list(struct priv
*priv
)
647 /* Filter list initialization should be done only once. */
648 if (priv
->fdir_filter_list
)
651 /* Create filters list. */
652 priv
->fdir_filter_list
=
653 rte_calloc(__func__
, 1, sizeof(*priv
->fdir_filter_list
), 0);
655 if (priv
->fdir_filter_list
== NULL
) {
658 ERROR("cannot allocate flow director filter list: %s",
663 LIST_INIT(priv
->fdir_filter_list
);
675 priv_fdir_filter_flush(struct priv
*priv
)
677 struct mlx5_fdir_filter
*mlx5_fdir_filter
;
679 while ((mlx5_fdir_filter
= LIST_FIRST(priv
->fdir_filter_list
))) {
680 struct ibv_exp_flow
*flow
= mlx5_fdir_filter
->flow
;
682 DEBUG("%p: flushing flow director filter %p",
683 (void *)priv
, (void *)mlx5_fdir_filter
);
684 LIST_REMOVE(mlx5_fdir_filter
, next
);
686 claim_zero(ibv_exp_destroy_flow(flow
));
687 rte_free(mlx5_fdir_filter
);
692 * Remove all flow director filters and delete list.
698 priv_fdir_delete_filters_list(struct priv
*priv
)
700 priv_fdir_filter_flush(priv
);
701 rte_free(priv
->fdir_filter_list
);
702 priv
->fdir_filter_list
= NULL
;
706 * Disable flow director, remove all steering rules.
712 priv_fdir_disable(struct priv
*priv
)
715 struct mlx5_fdir_filter
*mlx5_fdir_filter
;
717 /* Run on every flow director filter and destroy flow handle. */
718 LIST_FOREACH(mlx5_fdir_filter
, priv
->fdir_filter_list
, next
) {
719 struct ibv_exp_flow
*flow
;
721 /* Only valid elements should be in the list */
722 assert(mlx5_fdir_filter
!= NULL
);
723 flow
= mlx5_fdir_filter
->flow
;
725 /* Destroy flow handle */
727 claim_zero(ibv_exp_destroy_flow(flow
));
728 mlx5_fdir_filter
->flow
= NULL
;
732 /* Destroy flow director context in each RX queue. */
733 for (i
= 0; (i
!= priv
->rxqs_n
); i
++) {
734 struct rxq_ctrl
*rxq_ctrl
=
735 container_of((*priv
->rxqs
)[i
], struct rxq_ctrl
, rxq
);
737 if (!rxq_ctrl
->fdir_queue
)
739 priv_fdir_queue_destroy(priv
, rxq_ctrl
->fdir_queue
);
740 rxq_ctrl
->fdir_queue
= NULL
;
742 if (priv
->fdir_drop_queue
) {
743 priv_fdir_queue_destroy(priv
, priv
->fdir_drop_queue
);
744 priv
->fdir_drop_queue
= NULL
;
749 * Enable flow director, create steering rules.
755 priv_fdir_enable(struct priv
*priv
)
757 struct mlx5_fdir_filter
*mlx5_fdir_filter
;
759 /* Run on every fdir filter and create flow handle */
760 LIST_FOREACH(mlx5_fdir_filter
, priv
->fdir_filter_list
, next
) {
761 /* Only valid elements should be in the list */
762 assert(mlx5_fdir_filter
!= NULL
);
764 priv_fdir_filter_enable(priv
, mlx5_fdir_filter
);
769 * Find specific filter in list.
774 * Flow director filter to find.
777 * Filter element if found, otherwise NULL.
779 static struct mlx5_fdir_filter
*
780 priv_find_filter_in_list(struct priv
*priv
,
781 const struct rte_eth_fdir_filter
*fdir_filter
)
783 struct fdir_flow_desc desc
;
784 struct mlx5_fdir_filter
*mlx5_fdir_filter
;
785 enum rte_fdir_mode fdir_mode
= priv
->dev
->data
->dev_conf
.fdir_conf
.mode
;
787 /* Get flow director filter to look for. */
788 fdir_filter_to_flow_desc(fdir_filter
, &desc
, fdir_mode
);
790 /* Look for the requested element. */
791 LIST_FOREACH(mlx5_fdir_filter
, priv
->fdir_filter_list
, next
) {
792 /* Only valid elements should be in the list. */
793 assert(mlx5_fdir_filter
!= NULL
);
795 /* Return matching filter. */
796 if (!memcmp(&desc
, &mlx5_fdir_filter
->desc
, sizeof(desc
)))
797 return mlx5_fdir_filter
;
800 /* Filter not found */
805 * Add new flow director filter and store it in list.
810 * Flow director filter to add.
813 * 0 on success, errno value on failure.
816 priv_fdir_filter_add(struct priv
*priv
,
817 const struct rte_eth_fdir_filter
*fdir_filter
)
819 struct mlx5_fdir_filter
*mlx5_fdir_filter
;
820 enum rte_fdir_mode fdir_mode
= priv
->dev
->data
->dev_conf
.fdir_conf
.mode
;
823 /* Validate queue number. */
824 if (fdir_filter
->action
.rx_queue
>= priv
->rxqs_n
) {
825 ERROR("invalid queue number %d", fdir_filter
->action
.rx_queue
);
829 /* Duplicate filters are currently unsupported. */
830 mlx5_fdir_filter
= priv_find_filter_in_list(priv
, fdir_filter
);
831 if (mlx5_fdir_filter
!= NULL
) {
832 ERROR("filter already exists");
836 /* Create new flow director filter. */
838 rte_calloc(__func__
, 1, sizeof(*mlx5_fdir_filter
), 0);
839 if (mlx5_fdir_filter
== NULL
) {
841 ERROR("cannot allocate flow director filter: %s",
846 /* Set action parameters. */
847 mlx5_fdir_filter
->queue
= fdir_filter
->action
.rx_queue
;
848 mlx5_fdir_filter
->behavior
= fdir_filter
->action
.behavior
;
850 /* Convert to mlx5 filter descriptor. */
851 fdir_filter_to_flow_desc(fdir_filter
,
852 &mlx5_fdir_filter
->desc
, fdir_mode
);
854 /* Insert new filter into list. */
855 LIST_INSERT_HEAD(priv
->fdir_filter_list
, mlx5_fdir_filter
, next
);
857 DEBUG("%p: flow director filter %p added",
858 (void *)priv
, (void *)mlx5_fdir_filter
);
860 /* Enable filter immediately if device is started. */
862 err
= priv_fdir_filter_enable(priv
, mlx5_fdir_filter
);
868 * Update queue for specific filter.
873 * Filter to be updated.
876 * 0 on success, errno value on failure.
879 priv_fdir_filter_update(struct priv
*priv
,
880 const struct rte_eth_fdir_filter
*fdir_filter
)
882 struct mlx5_fdir_filter
*mlx5_fdir_filter
;
884 /* Validate queue number. */
885 if (fdir_filter
->action
.rx_queue
>= priv
->rxqs_n
) {
886 ERROR("invalid queue number %d", fdir_filter
->action
.rx_queue
);
890 mlx5_fdir_filter
= priv_find_filter_in_list(priv
, fdir_filter
);
891 if (mlx5_fdir_filter
!= NULL
) {
892 struct ibv_exp_flow
*flow
= mlx5_fdir_filter
->flow
;
895 /* Update queue number. */
896 mlx5_fdir_filter
->queue
= fdir_filter
->action
.rx_queue
;
898 /* Destroy flow handle. */
900 claim_zero(ibv_exp_destroy_flow(flow
));
901 mlx5_fdir_filter
->flow
= NULL
;
903 DEBUG("%p: flow director filter %p updated",
904 (void *)priv
, (void *)mlx5_fdir_filter
);
906 /* Enable filter if device is started. */
908 err
= priv_fdir_filter_enable(priv
, mlx5_fdir_filter
);
913 /* Filter not found, create it. */
914 DEBUG("%p: filter not found for update, creating new filter",
916 return priv_fdir_filter_add(priv
, fdir_filter
);
920 * Delete specific filter.
925 * Filter to be deleted.
928 * 0 on success, errno value on failure.
931 priv_fdir_filter_delete(struct priv
*priv
,
932 const struct rte_eth_fdir_filter
*fdir_filter
)
934 struct mlx5_fdir_filter
*mlx5_fdir_filter
;
936 mlx5_fdir_filter
= priv_find_filter_in_list(priv
, fdir_filter
);
937 if (mlx5_fdir_filter
!= NULL
) {
938 struct ibv_exp_flow
*flow
= mlx5_fdir_filter
->flow
;
940 /* Remove element from list. */
941 LIST_REMOVE(mlx5_fdir_filter
, next
);
943 /* Destroy flow handle. */
945 claim_zero(ibv_exp_destroy_flow(flow
));
946 mlx5_fdir_filter
->flow
= NULL
;
949 DEBUG("%p: flow director filter %p deleted",
950 (void *)priv
, (void *)mlx5_fdir_filter
);
953 rte_free(mlx5_fdir_filter
);
958 ERROR("%p: flow director delete failed, cannot find filter",
964 * Get flow director information.
968 * @param[out] fdir_info
969 * Resulting flow director information.
972 priv_fdir_info_get(struct priv
*priv
, struct rte_eth_fdir_info
*fdir_info
)
974 struct rte_eth_fdir_masks
*mask
=
975 &priv
->dev
->data
->dev_conf
.fdir_conf
.mask
;
977 fdir_info
->mode
= priv
->dev
->data
->dev_conf
.fdir_conf
.mode
;
978 fdir_info
->guarant_spc
= 0;
980 rte_memcpy(&fdir_info
->mask
, mask
, sizeof(fdir_info
->mask
));
982 fdir_info
->max_flexpayload
= 0;
983 fdir_info
->flow_types_mask
[0] = 0;
985 fdir_info
->flex_payload_unit
= 0;
986 fdir_info
->max_flex_payload_segment_num
= 0;
987 fdir_info
->flex_payload_limit
= 0;
988 memset(&fdir_info
->flex_conf
, 0, sizeof(fdir_info
->flex_conf
));
992 * Deal with flow director operations.
995 * Pointer to private structure.
997 * Operation to perform.
999 * Pointer to operation-specific structure.
1002 * 0 on success, errno value on failure.
1005 priv_fdir_ctrl_func(struct priv
*priv
, enum rte_filter_op filter_op
, void *arg
)
1007 enum rte_fdir_mode fdir_mode
=
1008 priv
->dev
->data
->dev_conf
.fdir_conf
.mode
;
1011 if (filter_op
== RTE_ETH_FILTER_NOP
)
1014 if (fdir_mode
!= RTE_FDIR_MODE_PERFECT
&&
1015 fdir_mode
!= RTE_FDIR_MODE_PERFECT_MAC_VLAN
) {
1016 ERROR("%p: flow director mode %d not supported",
1017 (void *)priv
, fdir_mode
);
1021 switch (filter_op
) {
1022 case RTE_ETH_FILTER_ADD
:
1023 ret
= priv_fdir_filter_add(priv
, arg
);
1025 case RTE_ETH_FILTER_UPDATE
:
1026 ret
= priv_fdir_filter_update(priv
, arg
);
1028 case RTE_ETH_FILTER_DELETE
:
1029 ret
= priv_fdir_filter_delete(priv
, arg
);
1031 case RTE_ETH_FILTER_FLUSH
:
1032 priv_fdir_filter_flush(priv
);
1034 case RTE_ETH_FILTER_INFO
:
1035 priv_fdir_info_get(priv
, arg
);
1038 DEBUG("%p: unknown operation %u", (void *)priv
, filter_op
);
1046 * Manage filter operations.
1049 * Pointer to Ethernet device structure.
1050 * @param filter_type
1053 * Operation to perform.
1055 * Pointer to operation-specific structure.
1058 * 0 on success, negative errno value on failure.
1061 mlx5_dev_filter_ctrl(struct rte_eth_dev
*dev
,
1062 enum rte_filter_type filter_type
,
1063 enum rte_filter_op filter_op
,
1067 struct priv
*priv
= dev
->data
->dev_private
;
1069 switch (filter_type
) {
1070 case RTE_ETH_FILTER_FDIR
:
1072 ret
= priv_fdir_ctrl_func(priv
, filter_op
, arg
);
1076 ERROR("%p: filter type (%d) not supported",
1077 (void *)dev
, filter_type
);