1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
11 #include <rte_common.h>
12 #include <rte_interrupts.h>
13 #include <rte_byteorder.h>
15 #include <rte_debug.h>
17 #include <rte_ether.h>
18 #include <rte_ethdev_driver.h>
19 #include <rte_ethdev_pci.h>
20 #include <rte_memory.h>
22 #include <rte_atomic.h>
23 #include <rte_malloc.h>
26 #include <rte_flow_driver.h>
28 #include "e1000_logs.h"
29 #include "base/e1000_api.h"
30 #include "e1000_ethdev.h"
32 #define NEXT_ITEM_OF_PATTERN(item, pattern, index) \
34 item = (pattern) + (index); \
35 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) { \
37 item = (pattern) + (index); \
41 #define NEXT_ITEM_OF_ACTION(act, actions, index) \
43 act = (actions) + (index); \
44 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
46 act = (actions) + (index); \
50 #define IGB_FLEX_RAW_NUM 12
53 * Please aware there's an asumption for all the parsers.
54 * rte_flow_item is using big endian, rte_flow_attr and
55 * rte_flow_action are using CPU order.
56 * Because the pattern is used to describe the packets,
57 * normally the packets should use network order.
61 * Parse the rule to see if it is a n-tuple rule.
62 * And get the n-tuple filter info BTW.
64 * The first not void item can be ETH or IPV4.
65 * The second not void item must be IPV4 if the first one is ETH.
66 * The third not void item must be UDP or TCP or SCTP
67 * The next not void item must be END.
69 * The first not void action should be QUEUE.
70 * The next not void action should be END.
74 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
75 * dst_addr 192.167.3.50 0xFFFFFFFF
76 * next_proto_id 17 0xFF
77 * UDP/TCP/ src_port 80 0xFFFF
78 * SCTP dst_port 80 0xFFFF
80 * other members in mask and spec should set to 0x00.
81 * item->last should be NULL.
84 cons_parse_ntuple_filter(const struct rte_flow_attr
*attr
,
85 const struct rte_flow_item pattern
[],
86 const struct rte_flow_action actions
[],
87 struct rte_eth_ntuple_filter
*filter
,
88 struct rte_flow_error
*error
)
90 const struct rte_flow_item
*item
;
91 const struct rte_flow_action
*act
;
92 const struct rte_flow_item_ipv4
*ipv4_spec
;
93 const struct rte_flow_item_ipv4
*ipv4_mask
;
94 const struct rte_flow_item_tcp
*tcp_spec
;
95 const struct rte_flow_item_tcp
*tcp_mask
;
96 const struct rte_flow_item_udp
*udp_spec
;
97 const struct rte_flow_item_udp
*udp_mask
;
98 const struct rte_flow_item_sctp
*sctp_spec
;
99 const struct rte_flow_item_sctp
*sctp_mask
;
103 rte_flow_error_set(error
,
104 EINVAL
, RTE_FLOW_ERROR_TYPE_ITEM_NUM
,
105 NULL
, "NULL pattern.");
110 rte_flow_error_set(error
, EINVAL
,
111 RTE_FLOW_ERROR_TYPE_ACTION_NUM
,
112 NULL
, "NULL action.");
116 rte_flow_error_set(error
, EINVAL
,
117 RTE_FLOW_ERROR_TYPE_ATTR
,
118 NULL
, "NULL attribute.");
125 /* the first not void item can be MAC or IPv4 */
126 NEXT_ITEM_OF_PATTERN(item
, pattern
, index
);
128 if (item
->type
!= RTE_FLOW_ITEM_TYPE_ETH
&&
129 item
->type
!= RTE_FLOW_ITEM_TYPE_IPV4
) {
130 rte_flow_error_set(error
, EINVAL
,
131 RTE_FLOW_ERROR_TYPE_ITEM
,
132 item
, "Not supported by ntuple filter");
136 if (item
->type
== RTE_FLOW_ITEM_TYPE_ETH
) {
137 /*Not supported last point for range*/
139 rte_flow_error_set(error
,
141 RTE_FLOW_ERROR_TYPE_UNSPECIFIED
,
142 item
, "Not supported last point for range");
145 /* if the first item is MAC, the content should be NULL */
146 if (item
->spec
|| item
->mask
) {
147 rte_flow_error_set(error
, EINVAL
,
148 RTE_FLOW_ERROR_TYPE_ITEM
,
149 item
, "Not supported by ntuple filter");
152 /* check if the next not void item is IPv4 */
154 NEXT_ITEM_OF_PATTERN(item
, pattern
, index
);
155 if (item
->type
!= RTE_FLOW_ITEM_TYPE_IPV4
) {
156 rte_flow_error_set(error
,
157 EINVAL
, RTE_FLOW_ERROR_TYPE_ITEM
,
158 item
, "Not supported by ntuple filter");
163 /* get the IPv4 info */
164 if (!item
->spec
|| !item
->mask
) {
165 rte_flow_error_set(error
, EINVAL
,
166 RTE_FLOW_ERROR_TYPE_ITEM
,
167 item
, "Invalid ntuple mask");
170 /* Not supported last point for range */
172 rte_flow_error_set(error
, EINVAL
,
173 RTE_FLOW_ERROR_TYPE_UNSPECIFIED
,
174 item
, "Not supported last point for range");
178 ipv4_mask
= item
->mask
;
180 * Only support src & dst addresses, protocol,
181 * others should be masked.
184 if (ipv4_mask
->hdr
.version_ihl
||
185 ipv4_mask
->hdr
.type_of_service
||
186 ipv4_mask
->hdr
.total_length
||
187 ipv4_mask
->hdr
.packet_id
||
188 ipv4_mask
->hdr
.fragment_offset
||
189 ipv4_mask
->hdr
.time_to_live
||
190 ipv4_mask
->hdr
.hdr_checksum
) {
191 rte_flow_error_set(error
,
192 EINVAL
, RTE_FLOW_ERROR_TYPE_ITEM
,
193 item
, "Not supported by ntuple filter");
197 filter
->dst_ip_mask
= ipv4_mask
->hdr
.dst_addr
;
198 filter
->src_ip_mask
= ipv4_mask
->hdr
.src_addr
;
199 filter
->proto_mask
= ipv4_mask
->hdr
.next_proto_id
;
201 ipv4_spec
= item
->spec
;
202 filter
->dst_ip
= ipv4_spec
->hdr
.dst_addr
;
203 filter
->src_ip
= ipv4_spec
->hdr
.src_addr
;
204 filter
->proto
= ipv4_spec
->hdr
.next_proto_id
;
206 /* check if the next not void item is TCP or UDP or SCTP */
208 NEXT_ITEM_OF_PATTERN(item
, pattern
, index
);
209 if (item
->type
!= RTE_FLOW_ITEM_TYPE_TCP
&&
210 item
->type
!= RTE_FLOW_ITEM_TYPE_UDP
&&
211 item
->type
!= RTE_FLOW_ITEM_TYPE_SCTP
) {
212 memset(filter
, 0, sizeof(struct rte_eth_ntuple_filter
));
213 rte_flow_error_set(error
, EINVAL
,
214 RTE_FLOW_ERROR_TYPE_ITEM
,
215 item
, "Not supported by ntuple filter");
219 /* Not supported last point for range */
221 memset(filter
, 0, sizeof(struct rte_eth_ntuple_filter
));
222 rte_flow_error_set(error
, EINVAL
,
223 RTE_FLOW_ERROR_TYPE_UNSPECIFIED
,
224 item
, "Not supported last point for range");
228 /* get the TCP/UDP/SCTP info */
229 if (item
->type
== RTE_FLOW_ITEM_TYPE_TCP
) {
230 if (item
->spec
&& item
->mask
) {
231 tcp_mask
= item
->mask
;
234 * Only support src & dst ports, tcp flags,
235 * others should be masked.
237 if (tcp_mask
->hdr
.sent_seq
||
238 tcp_mask
->hdr
.recv_ack
||
239 tcp_mask
->hdr
.data_off
||
240 tcp_mask
->hdr
.rx_win
||
241 tcp_mask
->hdr
.cksum
||
242 tcp_mask
->hdr
.tcp_urp
) {
244 sizeof(struct rte_eth_ntuple_filter
));
245 rte_flow_error_set(error
, EINVAL
,
246 RTE_FLOW_ERROR_TYPE_ITEM
,
247 item
, "Not supported by ntuple filter");
251 filter
->dst_port_mask
= tcp_mask
->hdr
.dst_port
;
252 filter
->src_port_mask
= tcp_mask
->hdr
.src_port
;
253 if (tcp_mask
->hdr
.tcp_flags
== 0xFF) {
254 filter
->flags
|= RTE_NTUPLE_FLAGS_TCP_FLAG
;
255 } else if (!tcp_mask
->hdr
.tcp_flags
) {
256 filter
->flags
&= ~RTE_NTUPLE_FLAGS_TCP_FLAG
;
259 sizeof(struct rte_eth_ntuple_filter
));
260 rte_flow_error_set(error
, EINVAL
,
261 RTE_FLOW_ERROR_TYPE_ITEM
,
262 item
, "Not supported by ntuple filter");
266 tcp_spec
= item
->spec
;
267 filter
->dst_port
= tcp_spec
->hdr
.dst_port
;
268 filter
->src_port
= tcp_spec
->hdr
.src_port
;
269 filter
->tcp_flags
= tcp_spec
->hdr
.tcp_flags
;
271 } else if (item
->type
== RTE_FLOW_ITEM_TYPE_UDP
) {
272 if (item
->spec
&& item
->mask
) {
273 udp_mask
= item
->mask
;
276 * Only support src & dst ports,
277 * others should be masked.
279 if (udp_mask
->hdr
.dgram_len
||
280 udp_mask
->hdr
.dgram_cksum
) {
282 sizeof(struct rte_eth_ntuple_filter
));
283 rte_flow_error_set(error
, EINVAL
,
284 RTE_FLOW_ERROR_TYPE_ITEM
,
285 item
, "Not supported by ntuple filter");
289 filter
->dst_port_mask
= udp_mask
->hdr
.dst_port
;
290 filter
->src_port_mask
= udp_mask
->hdr
.src_port
;
292 udp_spec
= item
->spec
;
293 filter
->dst_port
= udp_spec
->hdr
.dst_port
;
294 filter
->src_port
= udp_spec
->hdr
.src_port
;
297 if (item
->spec
&& item
->mask
) {
298 sctp_mask
= item
->mask
;
301 * Only support src & dst ports,
302 * others should be masked.
304 if (sctp_mask
->hdr
.tag
||
305 sctp_mask
->hdr
.cksum
) {
307 sizeof(struct rte_eth_ntuple_filter
));
308 rte_flow_error_set(error
, EINVAL
,
309 RTE_FLOW_ERROR_TYPE_ITEM
,
310 item
, "Not supported by ntuple filter");
314 filter
->dst_port_mask
= sctp_mask
->hdr
.dst_port
;
315 filter
->src_port_mask
= sctp_mask
->hdr
.src_port
;
317 sctp_spec
= (const struct rte_flow_item_sctp
*)
319 filter
->dst_port
= sctp_spec
->hdr
.dst_port
;
320 filter
->src_port
= sctp_spec
->hdr
.src_port
;
323 /* check if the next not void item is END */
325 NEXT_ITEM_OF_PATTERN(item
, pattern
, index
);
326 if (item
->type
!= RTE_FLOW_ITEM_TYPE_END
) {
327 memset(filter
, 0, sizeof(struct rte_eth_ntuple_filter
));
328 rte_flow_error_set(error
, EINVAL
,
329 RTE_FLOW_ERROR_TYPE_ITEM
,
330 item
, "Not supported by ntuple filter");
338 * n-tuple only supports forwarding,
339 * check if the first not void action is QUEUE.
341 NEXT_ITEM_OF_ACTION(act
, actions
, index
);
342 if (act
->type
!= RTE_FLOW_ACTION_TYPE_QUEUE
) {
343 memset(filter
, 0, sizeof(struct rte_eth_ntuple_filter
));
344 rte_flow_error_set(error
, EINVAL
,
345 RTE_FLOW_ERROR_TYPE_ACTION
,
346 item
, "Not supported action.");
350 ((const struct rte_flow_action_queue
*)act
->conf
)->index
;
352 /* check if the next not void item is END */
354 NEXT_ITEM_OF_ACTION(act
, actions
, index
);
355 if (act
->type
!= RTE_FLOW_ACTION_TYPE_END
) {
356 memset(filter
, 0, sizeof(struct rte_eth_ntuple_filter
));
357 rte_flow_error_set(error
, EINVAL
,
358 RTE_FLOW_ERROR_TYPE_ACTION
,
359 act
, "Not supported action.");
364 /* must be input direction */
365 if (!attr
->ingress
) {
366 memset(filter
, 0, sizeof(struct rte_eth_ntuple_filter
));
367 rte_flow_error_set(error
, EINVAL
,
368 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS
,
369 attr
, "Only support ingress.");
375 memset(filter
, 0, sizeof(struct rte_eth_ntuple_filter
));
376 rte_flow_error_set(error
, EINVAL
,
377 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS
,
378 attr
, "Not support egress.");
383 if (attr
->transfer
) {
384 memset(filter
, 0, sizeof(struct rte_eth_ntuple_filter
));
385 rte_flow_error_set(error
, EINVAL
,
386 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER
,
387 attr
, "No support for transfer.");
391 if (attr
->priority
> 0xFFFF) {
392 memset(filter
, 0, sizeof(struct rte_eth_ntuple_filter
));
393 rte_flow_error_set(error
, EINVAL
,
394 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY
,
395 attr
, "Error priority.");
398 filter
->priority
= (uint16_t)attr
->priority
;
403 /* a specific function for igb because the flags is specific */
405 igb_parse_ntuple_filter(struct rte_eth_dev
*dev
,
406 const struct rte_flow_attr
*attr
,
407 const struct rte_flow_item pattern
[],
408 const struct rte_flow_action actions
[],
409 struct rte_eth_ntuple_filter
*filter
,
410 struct rte_flow_error
*error
)
412 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
415 MAC_TYPE_FILTER_SUP(hw
->mac
.type
);
417 ret
= cons_parse_ntuple_filter(attr
, pattern
, actions
, filter
, error
);
422 /* Igb doesn't support many priorities. */
423 if (filter
->priority
> E1000_2TUPLE_MAX_PRI
) {
424 memset(filter
, 0, sizeof(struct rte_eth_ntuple_filter
));
425 rte_flow_error_set(error
, EINVAL
,
426 RTE_FLOW_ERROR_TYPE_ITEM
,
427 NULL
, "Priority not supported by ntuple filter");
431 if (hw
->mac
.type
== e1000_82576
) {
432 if (filter
->queue
>= IGB_MAX_RX_QUEUE_NUM_82576
) {
433 memset(filter
, 0, sizeof(struct rte_eth_ntuple_filter
));
434 rte_flow_error_set(error
, EINVAL
,
435 RTE_FLOW_ERROR_TYPE_ITEM
,
436 NULL
, "queue number not "
437 "supported by ntuple filter");
440 filter
->flags
|= RTE_5TUPLE_FLAGS
;
442 if (filter
->src_ip_mask
|| filter
->dst_ip_mask
||
443 filter
->src_port_mask
) {
444 memset(filter
, 0, sizeof(struct rte_eth_ntuple_filter
));
445 rte_flow_error_set(error
, EINVAL
,
446 RTE_FLOW_ERROR_TYPE_ITEM
,
447 NULL
, "only two tuple are "
448 "supported by this filter");
451 if (filter
->queue
>= IGB_MAX_RX_QUEUE_NUM
) {
452 memset(filter
, 0, sizeof(struct rte_eth_ntuple_filter
));
453 rte_flow_error_set(error
, EINVAL
,
454 RTE_FLOW_ERROR_TYPE_ITEM
,
455 NULL
, "queue number not "
456 "supported by ntuple filter");
459 filter
->flags
|= RTE_2TUPLE_FLAGS
;
466 * Parse the rule to see if it is a ethertype rule.
467 * And get the ethertype filter info BTW.
469 * The first not void item can be ETH.
470 * The next not void item must be END.
472 * The first not void action should be QUEUE.
473 * The next not void action should be END.
476 * ETH type 0x0807 0xFFFF
478 * other members in mask and spec should set to 0x00.
479 * item->last should be NULL.
482 cons_parse_ethertype_filter(const struct rte_flow_attr
*attr
,
483 const struct rte_flow_item
*pattern
,
484 const struct rte_flow_action
*actions
,
485 struct rte_eth_ethertype_filter
*filter
,
486 struct rte_flow_error
*error
)
488 const struct rte_flow_item
*item
;
489 const struct rte_flow_action
*act
;
490 const struct rte_flow_item_eth
*eth_spec
;
491 const struct rte_flow_item_eth
*eth_mask
;
492 const struct rte_flow_action_queue
*act_q
;
496 rte_flow_error_set(error
, EINVAL
,
497 RTE_FLOW_ERROR_TYPE_ITEM_NUM
,
498 NULL
, "NULL pattern.");
503 rte_flow_error_set(error
, EINVAL
,
504 RTE_FLOW_ERROR_TYPE_ACTION_NUM
,
505 NULL
, "NULL action.");
510 rte_flow_error_set(error
, EINVAL
,
511 RTE_FLOW_ERROR_TYPE_ATTR
,
512 NULL
, "NULL attribute.");
519 /* The first non-void item should be MAC. */
520 NEXT_ITEM_OF_PATTERN(item
, pattern
, index
);
521 if (item
->type
!= RTE_FLOW_ITEM_TYPE_ETH
) {
522 rte_flow_error_set(error
, EINVAL
,
523 RTE_FLOW_ERROR_TYPE_ITEM
,
524 item
, "Not supported by ethertype filter");
528 /*Not supported last point for range*/
530 rte_flow_error_set(error
, EINVAL
,
531 RTE_FLOW_ERROR_TYPE_UNSPECIFIED
,
532 item
, "Not supported last point for range");
536 /* Get the MAC info. */
537 if (!item
->spec
|| !item
->mask
) {
538 rte_flow_error_set(error
, EINVAL
,
539 RTE_FLOW_ERROR_TYPE_ITEM
,
540 item
, "Not supported by ethertype filter");
544 eth_spec
= item
->spec
;
545 eth_mask
= item
->mask
;
547 /* Mask bits of source MAC address must be full of 0.
548 * Mask bits of destination MAC address must be full
551 if (!is_zero_ether_addr(ð_mask
->src
) ||
552 (!is_zero_ether_addr(ð_mask
->dst
) &&
553 !is_broadcast_ether_addr(ð_mask
->dst
))) {
554 rte_flow_error_set(error
, EINVAL
,
555 RTE_FLOW_ERROR_TYPE_ITEM
,
556 item
, "Invalid ether address mask");
560 if ((eth_mask
->type
& UINT16_MAX
) != UINT16_MAX
) {
561 rte_flow_error_set(error
, EINVAL
,
562 RTE_FLOW_ERROR_TYPE_ITEM
,
563 item
, "Invalid ethertype mask");
567 /* If mask bits of destination MAC address
568 * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
570 if (is_broadcast_ether_addr(ð_mask
->dst
)) {
571 filter
->mac_addr
= eth_spec
->dst
;
572 filter
->flags
|= RTE_ETHTYPE_FLAGS_MAC
;
574 filter
->flags
&= ~RTE_ETHTYPE_FLAGS_MAC
;
576 filter
->ether_type
= rte_be_to_cpu_16(eth_spec
->type
);
578 /* Check if the next non-void item is END. */
580 NEXT_ITEM_OF_PATTERN(item
, pattern
, index
);
581 if (item
->type
!= RTE_FLOW_ITEM_TYPE_END
) {
582 rte_flow_error_set(error
, EINVAL
,
583 RTE_FLOW_ERROR_TYPE_ITEM
,
584 item
, "Not supported by ethertype filter.");
591 /* Check if the first non-void action is QUEUE or DROP. */
592 NEXT_ITEM_OF_ACTION(act
, actions
, index
);
593 if (act
->type
!= RTE_FLOW_ACTION_TYPE_QUEUE
&&
594 act
->type
!= RTE_FLOW_ACTION_TYPE_DROP
) {
595 rte_flow_error_set(error
, EINVAL
,
596 RTE_FLOW_ERROR_TYPE_ACTION
,
597 act
, "Not supported action.");
601 if (act
->type
== RTE_FLOW_ACTION_TYPE_QUEUE
) {
602 act_q
= (const struct rte_flow_action_queue
*)act
->conf
;
603 filter
->queue
= act_q
->index
;
605 filter
->flags
|= RTE_ETHTYPE_FLAGS_DROP
;
608 /* Check if the next non-void item is END */
610 NEXT_ITEM_OF_ACTION(act
, actions
, index
);
611 if (act
->type
!= RTE_FLOW_ACTION_TYPE_END
) {
612 rte_flow_error_set(error
, EINVAL
,
613 RTE_FLOW_ERROR_TYPE_ACTION
,
614 act
, "Not supported action.");
619 /* Must be input direction */
620 if (!attr
->ingress
) {
621 rte_flow_error_set(error
, EINVAL
,
622 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS
,
623 attr
, "Only support ingress.");
629 rte_flow_error_set(error
, EINVAL
,
630 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS
,
631 attr
, "Not support egress.");
636 if (attr
->transfer
) {
637 rte_flow_error_set(error
, EINVAL
,
638 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER
,
639 attr
, "No support for transfer.");
644 if (attr
->priority
) {
645 rte_flow_error_set(error
, EINVAL
,
646 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY
,
647 attr
, "Not support priority.");
653 rte_flow_error_set(error
, EINVAL
,
654 RTE_FLOW_ERROR_TYPE_ATTR_GROUP
,
655 attr
, "Not support group.");
663 igb_parse_ethertype_filter(struct rte_eth_dev
*dev
,
664 const struct rte_flow_attr
*attr
,
665 const struct rte_flow_item pattern
[],
666 const struct rte_flow_action actions
[],
667 struct rte_eth_ethertype_filter
*filter
,
668 struct rte_flow_error
*error
)
670 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
673 MAC_TYPE_FILTER_SUP(hw
->mac
.type
);
675 ret
= cons_parse_ethertype_filter(attr
, pattern
,
676 actions
, filter
, error
);
681 if (hw
->mac
.type
== e1000_82576
) {
682 if (filter
->queue
>= IGB_MAX_RX_QUEUE_NUM_82576
) {
683 memset(filter
, 0, sizeof(
684 struct rte_eth_ethertype_filter
));
685 rte_flow_error_set(error
, EINVAL
,
686 RTE_FLOW_ERROR_TYPE_ITEM
,
687 NULL
, "queue number not supported "
688 "by ethertype filter");
692 if (filter
->queue
>= IGB_MAX_RX_QUEUE_NUM
) {
693 memset(filter
, 0, sizeof(
694 struct rte_eth_ethertype_filter
));
695 rte_flow_error_set(error
, EINVAL
,
696 RTE_FLOW_ERROR_TYPE_ITEM
,
697 NULL
, "queue number not supported "
698 "by ethertype filter");
703 if (filter
->ether_type
== ETHER_TYPE_IPv4
||
704 filter
->ether_type
== ETHER_TYPE_IPv6
) {
705 memset(filter
, 0, sizeof(struct rte_eth_ethertype_filter
));
706 rte_flow_error_set(error
, EINVAL
,
707 RTE_FLOW_ERROR_TYPE_ITEM
,
708 NULL
, "IPv4/IPv6 not supported by ethertype filter");
712 if (filter
->flags
& RTE_ETHTYPE_FLAGS_MAC
) {
713 memset(filter
, 0, sizeof(struct rte_eth_ethertype_filter
));
714 rte_flow_error_set(error
, EINVAL
,
715 RTE_FLOW_ERROR_TYPE_ITEM
,
716 NULL
, "mac compare is unsupported");
720 if (filter
->flags
& RTE_ETHTYPE_FLAGS_DROP
) {
721 memset(filter
, 0, sizeof(struct rte_eth_ethertype_filter
));
722 rte_flow_error_set(error
, EINVAL
,
723 RTE_FLOW_ERROR_TYPE_ITEM
,
724 NULL
, "drop option is unsupported");
732 * Parse the rule to see if it is a TCP SYN rule.
733 * And get the TCP SYN filter info BTW.
735 * The first not void item must be ETH.
736 * The second not void item must be IPV4 or IPV6.
737 * The third not void item must be TCP.
738 * The next not void item must be END.
740 * The first not void action should be QUEUE.
741 * The next not void action should be END.
745 * IPV4/IPV6 NULL NULL
746 * TCP tcp_flags 0x02 0xFF
748 * other members in mask and spec should set to 0x00.
749 * item->last should be NULL.
752 cons_parse_syn_filter(const struct rte_flow_attr
*attr
,
753 const struct rte_flow_item pattern
[],
754 const struct rte_flow_action actions
[],
755 struct rte_eth_syn_filter
*filter
,
756 struct rte_flow_error
*error
)
758 const struct rte_flow_item
*item
;
759 const struct rte_flow_action
*act
;
760 const struct rte_flow_item_tcp
*tcp_spec
;
761 const struct rte_flow_item_tcp
*tcp_mask
;
762 const struct rte_flow_action_queue
*act_q
;
766 rte_flow_error_set(error
, EINVAL
,
767 RTE_FLOW_ERROR_TYPE_ITEM_NUM
,
768 NULL
, "NULL pattern.");
773 rte_flow_error_set(error
, EINVAL
,
774 RTE_FLOW_ERROR_TYPE_ACTION_NUM
,
775 NULL
, "NULL action.");
780 rte_flow_error_set(error
, EINVAL
,
781 RTE_FLOW_ERROR_TYPE_ATTR
,
782 NULL
, "NULL attribute.");
789 /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
790 NEXT_ITEM_OF_PATTERN(item
, pattern
, index
);
791 if (item
->type
!= RTE_FLOW_ITEM_TYPE_ETH
&&
792 item
->type
!= RTE_FLOW_ITEM_TYPE_IPV4
&&
793 item
->type
!= RTE_FLOW_ITEM_TYPE_IPV6
&&
794 item
->type
!= RTE_FLOW_ITEM_TYPE_TCP
) {
795 rte_flow_error_set(error
, EINVAL
,
796 RTE_FLOW_ERROR_TYPE_ITEM
,
797 item
, "Not supported by syn filter");
800 /*Not supported last point for range*/
802 rte_flow_error_set(error
, EINVAL
,
803 RTE_FLOW_ERROR_TYPE_UNSPECIFIED
,
804 item
, "Not supported last point for range");
809 if (item
->type
== RTE_FLOW_ITEM_TYPE_ETH
) {
810 /* if the item is MAC, the content should be NULL */
811 if (item
->spec
|| item
->mask
) {
812 rte_flow_error_set(error
, EINVAL
,
813 RTE_FLOW_ERROR_TYPE_ITEM
,
814 item
, "Invalid SYN address mask");
818 /* check if the next not void item is IPv4 or IPv6 */
820 NEXT_ITEM_OF_PATTERN(item
, pattern
, index
);
821 if (item
->type
!= RTE_FLOW_ITEM_TYPE_IPV4
&&
822 item
->type
!= RTE_FLOW_ITEM_TYPE_IPV6
) {
823 rte_flow_error_set(error
, EINVAL
,
824 RTE_FLOW_ERROR_TYPE_ITEM
,
825 item
, "Not supported by syn filter");
831 if (item
->type
== RTE_FLOW_ITEM_TYPE_IPV4
||
832 item
->type
== RTE_FLOW_ITEM_TYPE_IPV6
) {
833 /* if the item is IP, the content should be NULL */
834 if (item
->spec
|| item
->mask
) {
835 rte_flow_error_set(error
, EINVAL
,
836 RTE_FLOW_ERROR_TYPE_ITEM
,
837 item
, "Invalid SYN mask");
841 /* check if the next not void item is TCP */
843 NEXT_ITEM_OF_PATTERN(item
, pattern
, index
);
844 if (item
->type
!= RTE_FLOW_ITEM_TYPE_TCP
) {
845 rte_flow_error_set(error
, EINVAL
,
846 RTE_FLOW_ERROR_TYPE_ITEM
,
847 item
, "Not supported by syn filter");
852 /* Get the TCP info. Only support SYN. */
853 if (!item
->spec
|| !item
->mask
) {
854 rte_flow_error_set(error
, EINVAL
,
855 RTE_FLOW_ERROR_TYPE_ITEM
,
856 item
, "Invalid SYN mask");
859 /*Not supported last point for range*/
861 rte_flow_error_set(error
, EINVAL
,
862 RTE_FLOW_ERROR_TYPE_UNSPECIFIED
,
863 item
, "Not supported last point for range");
867 tcp_spec
= item
->spec
;
868 tcp_mask
= item
->mask
;
869 if (!(tcp_spec
->hdr
.tcp_flags
& TCP_SYN_FLAG
) ||
870 tcp_mask
->hdr
.src_port
||
871 tcp_mask
->hdr
.dst_port
||
872 tcp_mask
->hdr
.sent_seq
||
873 tcp_mask
->hdr
.recv_ack
||
874 tcp_mask
->hdr
.data_off
||
875 tcp_mask
->hdr
.tcp_flags
!= TCP_SYN_FLAG
||
876 tcp_mask
->hdr
.rx_win
||
877 tcp_mask
->hdr
.cksum
||
878 tcp_mask
->hdr
.tcp_urp
) {
879 memset(filter
, 0, sizeof(struct rte_eth_syn_filter
));
880 rte_flow_error_set(error
, EINVAL
,
881 RTE_FLOW_ERROR_TYPE_ITEM
,
882 item
, "Not supported by syn filter");
886 /* check if the next not void item is END */
888 NEXT_ITEM_OF_PATTERN(item
, pattern
, index
);
889 if (item
->type
!= RTE_FLOW_ITEM_TYPE_END
) {
890 memset(filter
, 0, sizeof(struct rte_eth_syn_filter
));
891 rte_flow_error_set(error
, EINVAL
,
892 RTE_FLOW_ERROR_TYPE_ITEM
,
893 item
, "Not supported by syn filter");
900 /* check if the first not void action is QUEUE. */
901 NEXT_ITEM_OF_ACTION(act
, actions
, index
);
902 if (act
->type
!= RTE_FLOW_ACTION_TYPE_QUEUE
) {
903 memset(filter
, 0, sizeof(struct rte_eth_syn_filter
));
904 rte_flow_error_set(error
, EINVAL
,
905 RTE_FLOW_ERROR_TYPE_ACTION
,
906 act
, "Not supported action.");
910 act_q
= (const struct rte_flow_action_queue
*)act
->conf
;
911 filter
->queue
= act_q
->index
;
913 /* check if the next not void item is END */
915 NEXT_ITEM_OF_ACTION(act
, actions
, index
);
916 if (act
->type
!= RTE_FLOW_ACTION_TYPE_END
) {
917 memset(filter
, 0, sizeof(struct rte_eth_syn_filter
));
918 rte_flow_error_set(error
, EINVAL
,
919 RTE_FLOW_ERROR_TYPE_ACTION
,
920 act
, "Not supported action.");
925 /* must be input direction */
926 if (!attr
->ingress
) {
927 memset(filter
, 0, sizeof(struct rte_eth_syn_filter
));
928 rte_flow_error_set(error
, EINVAL
,
929 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS
,
930 attr
, "Only support ingress.");
936 memset(filter
, 0, sizeof(struct rte_eth_syn_filter
));
937 rte_flow_error_set(error
, EINVAL
,
938 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS
,
939 attr
, "Not support egress.");
944 if (attr
->transfer
) {
945 memset(filter
, 0, sizeof(struct rte_eth_syn_filter
));
946 rte_flow_error_set(error
, EINVAL
,
947 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER
,
948 attr
, "No support for transfer.");
952 /* Support 2 priorities, the lowest or highest. */
953 if (!attr
->priority
) {
955 } else if (attr
->priority
== (uint32_t)~0U) {
958 memset(filter
, 0, sizeof(struct rte_eth_syn_filter
));
959 rte_flow_error_set(error
, EINVAL
,
960 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY
,
961 attr
, "Not support priority.");
969 igb_parse_syn_filter(struct rte_eth_dev
*dev
,
970 const struct rte_flow_attr
*attr
,
971 const struct rte_flow_item pattern
[],
972 const struct rte_flow_action actions
[],
973 struct rte_eth_syn_filter
*filter
,
974 struct rte_flow_error
*error
)
976 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
979 MAC_TYPE_FILTER_SUP(hw
->mac
.type
);
981 ret
= cons_parse_syn_filter(attr
, pattern
,
982 actions
, filter
, error
);
984 if (hw
->mac
.type
== e1000_82576
) {
985 if (filter
->queue
>= IGB_MAX_RX_QUEUE_NUM_82576
) {
986 memset(filter
, 0, sizeof(struct rte_eth_syn_filter
));
987 rte_flow_error_set(error
, EINVAL
,
988 RTE_FLOW_ERROR_TYPE_ITEM
,
989 NULL
, "queue number not "
990 "supported by syn filter");
994 if (filter
->queue
>= IGB_MAX_RX_QUEUE_NUM
) {
995 memset(filter
, 0, sizeof(struct rte_eth_syn_filter
));
996 rte_flow_error_set(error
, EINVAL
,
997 RTE_FLOW_ERROR_TYPE_ITEM
,
998 NULL
, "queue number not "
999 "supported by syn filter");
1011 * Parse the rule to see if it is a flex byte rule.
1012 * And get the flex byte filter info BTW.
1014 * The first not void item must be RAW.
1015 * The second not void item can be RAW or END.
1016 * The third not void item can be RAW or END.
1017 * The last not void item must be END.
1019 * The first not void action should be QUEUE.
1020 * The next not void action should be END.
1023 * RAW relative 0 0x1
1024 * offset 0 0xFFFFFFFF
1025 * pattern {0x08, 0x06} {0xFF, 0xFF}
1026 * RAW relative 1 0x1
1027 * offset 100 0xFFFFFFFF
1028 * pattern {0x11, 0x22, 0x33} {0xFF, 0xFF, 0xFF}
1030 * other members in mask and spec should set to 0x00.
1031 * item->last should be NULL.
1034 cons_parse_flex_filter(const struct rte_flow_attr
*attr
,
1035 const struct rte_flow_item pattern
[],
1036 const struct rte_flow_action actions
[],
1037 struct rte_eth_flex_filter
*filter
,
1038 struct rte_flow_error
*error
)
1040 const struct rte_flow_item
*item
;
1041 const struct rte_flow_action
*act
;
1042 const struct rte_flow_item_raw
*raw_spec
;
1043 const struct rte_flow_item_raw
*raw_mask
;
1044 const struct rte_flow_action_queue
*act_q
;
1045 uint32_t index
, i
, offset
, total_offset
;
1046 uint32_t max_offset
= 0;
1047 int32_t shift
, j
, raw_index
= 0;
1048 int32_t relative
[IGB_FLEX_RAW_NUM
] = {0};
1049 int32_t raw_offset
[IGB_FLEX_RAW_NUM
] = {0};
1052 rte_flow_error_set(error
, EINVAL
,
1053 RTE_FLOW_ERROR_TYPE_ITEM_NUM
,
1054 NULL
, "NULL pattern.");
1059 rte_flow_error_set(error
, EINVAL
,
1060 RTE_FLOW_ERROR_TYPE_ACTION_NUM
,
1061 NULL
, "NULL action.");
1066 rte_flow_error_set(error
, EINVAL
,
1067 RTE_FLOW_ERROR_TYPE_ATTR
,
1068 NULL
, "NULL attribute.");
1077 /* the first not void item should be RAW */
1078 NEXT_ITEM_OF_PATTERN(item
, pattern
, index
);
1079 if (item
->type
!= RTE_FLOW_ITEM_TYPE_RAW
) {
1080 rte_flow_error_set(error
, EINVAL
,
1081 RTE_FLOW_ERROR_TYPE_ITEM
,
1082 item
, "Not supported by flex filter");
1085 /*Not supported last point for range*/
1087 rte_flow_error_set(error
, EINVAL
,
1088 RTE_FLOW_ERROR_TYPE_UNSPECIFIED
,
1089 item
, "Not supported last point for range");
1093 raw_spec
= item
->spec
;
1094 raw_mask
= item
->mask
;
1096 if (!raw_mask
->length
||
1097 !raw_mask
->relative
) {
1098 memset(filter
, 0, sizeof(struct rte_eth_flex_filter
));
1099 rte_flow_error_set(error
, EINVAL
,
1100 RTE_FLOW_ERROR_TYPE_ITEM
,
1101 item
, "Not supported by flex filter");
1105 if (raw_mask
->offset
)
1106 offset
= raw_spec
->offset
;
1110 for (j
= 0; j
< raw_spec
->length
; j
++) {
1111 if (raw_mask
->pattern
[j
] != 0xFF) {
1112 memset(filter
, 0, sizeof(struct rte_eth_flex_filter
));
1113 rte_flow_error_set(error
, EINVAL
,
1114 RTE_FLOW_ERROR_TYPE_ITEM
,
1115 item
, "Not supported by flex filter");
1122 if (raw_spec
->relative
) {
1123 for (j
= raw_index
; j
> 0; j
--) {
1124 total_offset
+= raw_offset
[j
- 1];
1125 if (!relative
[j
- 1])
1128 if (total_offset
+ raw_spec
->length
+ offset
> max_offset
)
1129 max_offset
= total_offset
+ raw_spec
->length
+ offset
;
1131 if (raw_spec
->length
+ offset
> max_offset
)
1132 max_offset
= raw_spec
->length
+ offset
;
1135 if ((raw_spec
->length
+ offset
+ total_offset
) >
1136 RTE_FLEX_FILTER_MAXLEN
) {
1137 memset(filter
, 0, sizeof(struct rte_eth_flex_filter
));
1138 rte_flow_error_set(error
, EINVAL
,
1139 RTE_FLOW_ERROR_TYPE_ITEM
,
1140 item
, "Not supported by flex filter");
1144 if (raw_spec
->relative
== 0) {
1145 for (j
= 0; j
< raw_spec
->length
; j
++)
1146 filter
->bytes
[offset
+ j
] =
1147 raw_spec
->pattern
[j
];
1148 j
= offset
/ CHAR_BIT
;
1149 shift
= offset
% CHAR_BIT
;
1151 for (j
= 0; j
< raw_spec
->length
; j
++)
1152 filter
->bytes
[total_offset
+ offset
+ j
] =
1153 raw_spec
->pattern
[j
];
1154 j
= (total_offset
+ offset
) / CHAR_BIT
;
1155 shift
= (total_offset
+ offset
) % CHAR_BIT
;
1160 for ( ; shift
< CHAR_BIT
; shift
++) {
1161 filter
->mask
[j
] |= (0x80 >> shift
);
1163 if (i
== raw_spec
->length
)
1165 if (shift
== (CHAR_BIT
- 1)) {
1171 relative
[raw_index
] = raw_spec
->relative
;
1172 raw_offset
[raw_index
] = offset
+ raw_spec
->length
;
1175 /* check if the next not void item is RAW */
1177 NEXT_ITEM_OF_PATTERN(item
, pattern
, index
);
1178 if (item
->type
!= RTE_FLOW_ITEM_TYPE_RAW
&&
1179 item
->type
!= RTE_FLOW_ITEM_TYPE_END
) {
1180 rte_flow_error_set(error
, EINVAL
,
1181 RTE_FLOW_ERROR_TYPE_ITEM
,
1182 item
, "Not supported by flex filter");
1186 /* go back to parser */
1187 if (item
->type
== RTE_FLOW_ITEM_TYPE_RAW
) {
1188 /* if the item is RAW, the content should be parse */
1192 filter
->len
= RTE_ALIGN(max_offset
, 8);
1197 /* check if the first not void action is QUEUE. */
1198 NEXT_ITEM_OF_ACTION(act
, actions
, index
);
1199 if (act
->type
!= RTE_FLOW_ACTION_TYPE_QUEUE
) {
1200 memset(filter
, 0, sizeof(struct rte_eth_flex_filter
));
1201 rte_flow_error_set(error
, EINVAL
,
1202 RTE_FLOW_ERROR_TYPE_ACTION
,
1203 act
, "Not supported action.");
1207 act_q
= (const struct rte_flow_action_queue
*)act
->conf
;
1208 filter
->queue
= act_q
->index
;
1210 /* check if the next not void item is END */
1212 NEXT_ITEM_OF_ACTION(act
, actions
, index
);
1213 if (act
->type
!= RTE_FLOW_ACTION_TYPE_END
) {
1214 memset(filter
, 0, sizeof(struct rte_eth_flex_filter
));
1215 rte_flow_error_set(error
, EINVAL
,
1216 RTE_FLOW_ERROR_TYPE_ACTION
,
1217 act
, "Not supported action.");
1222 /* must be input direction */
1223 if (!attr
->ingress
) {
1224 memset(filter
, 0, sizeof(struct rte_eth_flex_filter
));
1225 rte_flow_error_set(error
, EINVAL
,
1226 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS
,
1227 attr
, "Only support ingress.");
1233 memset(filter
, 0, sizeof(struct rte_eth_flex_filter
));
1234 rte_flow_error_set(error
, EINVAL
,
1235 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS
,
1236 attr
, "Not support egress.");
1241 if (attr
->transfer
) {
1242 memset(filter
, 0, sizeof(struct rte_eth_flex_filter
));
1243 rte_flow_error_set(error
, EINVAL
,
1244 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER
,
1245 attr
, "No support for transfer.");
1249 if (attr
->priority
> 0xFFFF) {
1250 memset(filter
, 0, sizeof(struct rte_eth_flex_filter
));
1251 rte_flow_error_set(error
, EINVAL
,
1252 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY
,
1253 attr
, "Error priority.");
1257 filter
->priority
= (uint16_t)attr
->priority
;
1263 igb_parse_flex_filter(struct rte_eth_dev
*dev
,
1264 const struct rte_flow_attr
*attr
,
1265 const struct rte_flow_item pattern
[],
1266 const struct rte_flow_action actions
[],
1267 struct rte_eth_flex_filter
*filter
,
1268 struct rte_flow_error
*error
)
1270 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
1273 MAC_TYPE_FILTER_SUP_EXT(hw
->mac
.type
);
1275 ret
= cons_parse_flex_filter(attr
, pattern
,
1276 actions
, filter
, error
);
1278 if (filter
->queue
>= IGB_MAX_RX_QUEUE_NUM
) {
1279 memset(filter
, 0, sizeof(struct rte_eth_flex_filter
));
1280 rte_flow_error_set(error
, EINVAL
,
1281 RTE_FLOW_ERROR_TYPE_ITEM
,
1282 NULL
, "queue number not supported by flex filter");
1286 if (filter
->len
== 0 || filter
->len
> E1000_MAX_FLEX_FILTER_LEN
||
1287 filter
->len
% sizeof(uint64_t) != 0) {
1288 PMD_DRV_LOG(ERR
, "filter's length is out of range");
1292 if (filter
->priority
> E1000_MAX_FLEX_FILTER_PRI
) {
1293 PMD_DRV_LOG(ERR
, "filter's priority is out of range");
1304 igb_parse_rss_filter(struct rte_eth_dev
*dev
,
1305 const struct rte_flow_attr
*attr
,
1306 const struct rte_flow_action actions
[],
1307 struct igb_rte_flow_rss_conf
*rss_conf
,
1308 struct rte_flow_error
*error
)
1310 const struct rte_flow_action
*act
;
1311 const struct rte_flow_action_rss
*rss
;
1315 * rss only supports forwarding,
1316 * check if the first not void action is RSS.
1319 NEXT_ITEM_OF_ACTION(act
, actions
, index
);
1320 if (act
->type
!= RTE_FLOW_ACTION_TYPE_RSS
) {
1321 memset(rss_conf
, 0, sizeof(struct igb_rte_flow_rss_conf
));
1322 rte_flow_error_set(error
, EINVAL
,
1323 RTE_FLOW_ERROR_TYPE_ACTION
,
1324 act
, "Not supported action.");
1328 rss
= (const struct rte_flow_action_rss
*)act
->conf
;
1330 if (!rss
|| !rss
->queue_num
) {
1331 rte_flow_error_set(error
, EINVAL
,
1332 RTE_FLOW_ERROR_TYPE_ACTION
,
1338 for (n
= 0; n
< rss
->queue_num
; n
++) {
1339 if (rss
->queue
[n
] >= dev
->data
->nb_rx_queues
) {
1340 rte_flow_error_set(error
, EINVAL
,
1341 RTE_FLOW_ERROR_TYPE_ACTION
,
1343 "queue id > max number of queues");
1348 if (rss
->func
!= RTE_ETH_HASH_FUNCTION_DEFAULT
)
1349 return rte_flow_error_set
1350 (error
, ENOTSUP
, RTE_FLOW_ERROR_TYPE_ACTION
, act
,
1351 "non-default RSS hash functions are not supported");
1353 return rte_flow_error_set
1354 (error
, ENOTSUP
, RTE_FLOW_ERROR_TYPE_ACTION
, act
,
1355 "a nonzero RSS encapsulation level is not supported");
1356 if (rss
->key_len
&& rss
->key_len
!= RTE_DIM(rss_conf
->key
))
1357 return rte_flow_error_set
1358 (error
, ENOTSUP
, RTE_FLOW_ERROR_TYPE_ACTION
, act
,
1359 "RSS hash key must be exactly 40 bytes");
1360 if (rss
->queue_num
> RTE_DIM(rss_conf
->queue
))
1361 return rte_flow_error_set
1362 (error
, ENOTSUP
, RTE_FLOW_ERROR_TYPE_ACTION
, act
,
1363 "too many queues for RSS context");
1364 if (igb_rss_conf_init(rss_conf
, rss
))
1365 return rte_flow_error_set
1366 (error
, EINVAL
, RTE_FLOW_ERROR_TYPE_ACTION
, act
,
1367 "RSS context initialization failure");
1369 /* check if the next not void item is END */
1371 NEXT_ITEM_OF_ACTION(act
, actions
, index
);
1372 if (act
->type
!= RTE_FLOW_ACTION_TYPE_END
) {
1373 memset(rss_conf
, 0, sizeof(struct rte_eth_rss_conf
));
1374 rte_flow_error_set(error
, EINVAL
,
1375 RTE_FLOW_ERROR_TYPE_ACTION
,
1376 act
, "Not supported action.");
1381 /* must be input direction */
1382 if (!attr
->ingress
) {
1383 memset(rss_conf
, 0, sizeof(struct igb_rte_flow_rss_conf
));
1384 rte_flow_error_set(error
, EINVAL
,
1385 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS
,
1386 attr
, "Only support ingress.");
1392 memset(rss_conf
, 0, sizeof(struct igb_rte_flow_rss_conf
));
1393 rte_flow_error_set(error
, EINVAL
,
1394 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS
,
1395 attr
, "Not support egress.");
1400 if (attr
->transfer
) {
1401 memset(rss_conf
, 0, sizeof(struct igb_rte_flow_rss_conf
));
1402 rte_flow_error_set(error
, EINVAL
,
1403 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER
,
1404 attr
, "No support for transfer.");
1408 if (attr
->priority
> 0xFFFF) {
1409 memset(rss_conf
, 0, sizeof(struct igb_rte_flow_rss_conf
));
1410 rte_flow_error_set(error
, EINVAL
,
1411 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY
,
1412 attr
, "Error priority.");
1420 * Create a flow rule.
1421 * Theorically one rule can match more than one filters.
1422 * We will let it use the filter which it hitt first.
1423 * So, the sequence matters.
1425 static struct rte_flow
*
1426 igb_flow_create(struct rte_eth_dev
*dev
,
1427 const struct rte_flow_attr
*attr
,
1428 const struct rte_flow_item pattern
[],
1429 const struct rte_flow_action actions
[],
1430 struct rte_flow_error
*error
)
1433 struct rte_eth_ntuple_filter ntuple_filter
;
1434 struct rte_eth_ethertype_filter ethertype_filter
;
1435 struct rte_eth_syn_filter syn_filter
;
1436 struct rte_eth_flex_filter flex_filter
;
1437 struct igb_rte_flow_rss_conf rss_conf
;
1438 struct rte_flow
*flow
= NULL
;
1439 struct igb_ntuple_filter_ele
*ntuple_filter_ptr
;
1440 struct igb_ethertype_filter_ele
*ethertype_filter_ptr
;
1441 struct igb_eth_syn_filter_ele
*syn_filter_ptr
;
1442 struct igb_flex_filter_ele
*flex_filter_ptr
;
1443 struct igb_rss_conf_ele
*rss_filter_ptr
;
1444 struct igb_flow_mem
*igb_flow_mem_ptr
;
1446 flow
= rte_zmalloc("igb_rte_flow", sizeof(struct rte_flow
), 0);
1448 PMD_DRV_LOG(ERR
, "failed to allocate memory");
1449 return (struct rte_flow
*)flow
;
1451 igb_flow_mem_ptr
= rte_zmalloc("igb_flow_mem",
1452 sizeof(struct igb_flow_mem
), 0);
1453 if (!igb_flow_mem_ptr
) {
1454 PMD_DRV_LOG(ERR
, "failed to allocate memory");
1458 igb_flow_mem_ptr
->flow
= flow
;
1459 igb_flow_mem_ptr
->dev
= dev
;
1460 TAILQ_INSERT_TAIL(&igb_flow_list
,
1461 igb_flow_mem_ptr
, entries
);
1463 memset(&ntuple_filter
, 0, sizeof(struct rte_eth_ntuple_filter
));
1464 ret
= igb_parse_ntuple_filter(dev
, attr
, pattern
,
1465 actions
, &ntuple_filter
, error
);
1467 ret
= igb_add_del_ntuple_filter(dev
, &ntuple_filter
, TRUE
);
1469 ntuple_filter_ptr
= rte_zmalloc("igb_ntuple_filter",
1470 sizeof(struct igb_ntuple_filter_ele
), 0);
1471 if (!ntuple_filter_ptr
) {
1472 PMD_DRV_LOG(ERR
, "failed to allocate memory");
1476 rte_memcpy(&ntuple_filter_ptr
->filter_info
,
1478 sizeof(struct rte_eth_ntuple_filter
));
1479 TAILQ_INSERT_TAIL(&igb_filter_ntuple_list
,
1480 ntuple_filter_ptr
, entries
);
1481 flow
->rule
= ntuple_filter_ptr
;
1482 flow
->filter_type
= RTE_ETH_FILTER_NTUPLE
;
1488 memset(ðertype_filter
, 0, sizeof(struct rte_eth_ethertype_filter
));
1489 ret
= igb_parse_ethertype_filter(dev
, attr
, pattern
,
1490 actions
, ðertype_filter
, error
);
1492 ret
= igb_add_del_ethertype_filter(dev
,
1493 ðertype_filter
, TRUE
);
1495 ethertype_filter_ptr
= rte_zmalloc(
1496 "igb_ethertype_filter",
1497 sizeof(struct igb_ethertype_filter_ele
), 0);
1498 if (!ethertype_filter_ptr
) {
1499 PMD_DRV_LOG(ERR
, "failed to allocate memory");
1503 rte_memcpy(ðertype_filter_ptr
->filter_info
,
1505 sizeof(struct rte_eth_ethertype_filter
));
1506 TAILQ_INSERT_TAIL(&igb_filter_ethertype_list
,
1507 ethertype_filter_ptr
, entries
);
1508 flow
->rule
= ethertype_filter_ptr
;
1509 flow
->filter_type
= RTE_ETH_FILTER_ETHERTYPE
;
1515 memset(&syn_filter
, 0, sizeof(struct rte_eth_syn_filter
));
1516 ret
= igb_parse_syn_filter(dev
, attr
, pattern
,
1517 actions
, &syn_filter
, error
);
1519 ret
= eth_igb_syn_filter_set(dev
, &syn_filter
, TRUE
);
1521 syn_filter_ptr
= rte_zmalloc("igb_syn_filter",
1522 sizeof(struct igb_eth_syn_filter_ele
), 0);
1523 if (!syn_filter_ptr
) {
1524 PMD_DRV_LOG(ERR
, "failed to allocate memory");
1528 rte_memcpy(&syn_filter_ptr
->filter_info
,
1530 sizeof(struct rte_eth_syn_filter
));
1531 TAILQ_INSERT_TAIL(&igb_filter_syn_list
,
1534 flow
->rule
= syn_filter_ptr
;
1535 flow
->filter_type
= RTE_ETH_FILTER_SYN
;
1541 memset(&flex_filter
, 0, sizeof(struct rte_eth_flex_filter
));
1542 ret
= igb_parse_flex_filter(dev
, attr
, pattern
,
1543 actions
, &flex_filter
, error
);
1545 ret
= eth_igb_add_del_flex_filter(dev
, &flex_filter
, TRUE
);
1547 flex_filter_ptr
= rte_zmalloc("igb_flex_filter",
1548 sizeof(struct igb_flex_filter_ele
), 0);
1549 if (!flex_filter_ptr
) {
1550 PMD_DRV_LOG(ERR
, "failed to allocate memory");
1554 rte_memcpy(&flex_filter_ptr
->filter_info
,
1556 sizeof(struct rte_eth_flex_filter
));
1557 TAILQ_INSERT_TAIL(&igb_filter_flex_list
,
1558 flex_filter_ptr
, entries
);
1559 flow
->rule
= flex_filter_ptr
;
1560 flow
->filter_type
= RTE_ETH_FILTER_FLEXIBLE
;
1565 memset(&rss_conf
, 0, sizeof(struct igb_rte_flow_rss_conf
));
1566 ret
= igb_parse_rss_filter(dev
, attr
,
1567 actions
, &rss_conf
, error
);
1569 ret
= igb_config_rss_filter(dev
, &rss_conf
, TRUE
);
1571 rss_filter_ptr
= rte_zmalloc("igb_rss_filter",
1572 sizeof(struct igb_rss_conf_ele
), 0);
1573 if (!rss_filter_ptr
) {
1574 PMD_DRV_LOG(ERR
, "failed to allocate memory");
1577 igb_rss_conf_init(&rss_filter_ptr
->filter_info
,
1579 TAILQ_INSERT_TAIL(&igb_filter_rss_list
,
1580 rss_filter_ptr
, entries
);
1581 flow
->rule
= rss_filter_ptr
;
1582 flow
->filter_type
= RTE_ETH_FILTER_HASH
;
1588 TAILQ_REMOVE(&igb_flow_list
,
1589 igb_flow_mem_ptr
, entries
);
1590 rte_flow_error_set(error
, -ret
,
1591 RTE_FLOW_ERROR_TYPE_HANDLE
, NULL
,
1592 "Failed to create flow.");
1593 rte_free(igb_flow_mem_ptr
);
1599 * Check if the flow rule is supported by igb.
1600 * It only checkes the format. Don't guarantee the rule can be programmed into
1601 * the HW. Because there can be no enough room for the rule.
1604 igb_flow_validate(__rte_unused
struct rte_eth_dev
*dev
,
1605 const struct rte_flow_attr
*attr
,
1606 const struct rte_flow_item pattern
[],
1607 const struct rte_flow_action actions
[],
1608 struct rte_flow_error
*error
)
1610 struct rte_eth_ntuple_filter ntuple_filter
;
1611 struct rte_eth_ethertype_filter ethertype_filter
;
1612 struct rte_eth_syn_filter syn_filter
;
1613 struct rte_eth_flex_filter flex_filter
;
1614 struct igb_rte_flow_rss_conf rss_conf
;
1617 memset(&ntuple_filter
, 0, sizeof(struct rte_eth_ntuple_filter
));
1618 ret
= igb_parse_ntuple_filter(dev
, attr
, pattern
,
1619 actions
, &ntuple_filter
, error
);
1623 memset(ðertype_filter
, 0, sizeof(struct rte_eth_ethertype_filter
));
1624 ret
= igb_parse_ethertype_filter(dev
, attr
, pattern
,
1625 actions
, ðertype_filter
, error
);
1629 memset(&syn_filter
, 0, sizeof(struct rte_eth_syn_filter
));
1630 ret
= igb_parse_syn_filter(dev
, attr
, pattern
,
1631 actions
, &syn_filter
, error
);
1635 memset(&flex_filter
, 0, sizeof(struct rte_eth_flex_filter
));
1636 ret
= igb_parse_flex_filter(dev
, attr
, pattern
,
1637 actions
, &flex_filter
, error
);
1641 memset(&rss_conf
, 0, sizeof(struct igb_rte_flow_rss_conf
));
1642 ret
= igb_parse_rss_filter(dev
, attr
,
1643 actions
, &rss_conf
, error
);
1648 /* Destroy a flow rule on igb. */
1650 igb_flow_destroy(struct rte_eth_dev
*dev
,
1651 struct rte_flow
*flow
,
1652 struct rte_flow_error
*error
)
1655 struct rte_flow
*pmd_flow
= flow
;
1656 enum rte_filter_type filter_type
= pmd_flow
->filter_type
;
1657 struct igb_ntuple_filter_ele
*ntuple_filter_ptr
;
1658 struct igb_ethertype_filter_ele
*ethertype_filter_ptr
;
1659 struct igb_eth_syn_filter_ele
*syn_filter_ptr
;
1660 struct igb_flex_filter_ele
*flex_filter_ptr
;
1661 struct igb_flow_mem
*igb_flow_mem_ptr
;
1662 struct igb_rss_conf_ele
*rss_filter_ptr
;
1664 switch (filter_type
) {
1665 case RTE_ETH_FILTER_NTUPLE
:
1666 ntuple_filter_ptr
= (struct igb_ntuple_filter_ele
*)
1668 ret
= igb_add_del_ntuple_filter(dev
,
1669 &ntuple_filter_ptr
->filter_info
, FALSE
);
1671 TAILQ_REMOVE(&igb_filter_ntuple_list
,
1672 ntuple_filter_ptr
, entries
);
1673 rte_free(ntuple_filter_ptr
);
1676 case RTE_ETH_FILTER_ETHERTYPE
:
1677 ethertype_filter_ptr
= (struct igb_ethertype_filter_ele
*)
1679 ret
= igb_add_del_ethertype_filter(dev
,
1680 ðertype_filter_ptr
->filter_info
, FALSE
);
1682 TAILQ_REMOVE(&igb_filter_ethertype_list
,
1683 ethertype_filter_ptr
, entries
);
1684 rte_free(ethertype_filter_ptr
);
1687 case RTE_ETH_FILTER_SYN
:
1688 syn_filter_ptr
= (struct igb_eth_syn_filter_ele
*)
1690 ret
= eth_igb_syn_filter_set(dev
,
1691 &syn_filter_ptr
->filter_info
, FALSE
);
1693 TAILQ_REMOVE(&igb_filter_syn_list
,
1694 syn_filter_ptr
, entries
);
1695 rte_free(syn_filter_ptr
);
1698 case RTE_ETH_FILTER_FLEXIBLE
:
1699 flex_filter_ptr
= (struct igb_flex_filter_ele
*)
1701 ret
= eth_igb_add_del_flex_filter(dev
,
1702 &flex_filter_ptr
->filter_info
, FALSE
);
1704 TAILQ_REMOVE(&igb_filter_flex_list
,
1705 flex_filter_ptr
, entries
);
1706 rte_free(flex_filter_ptr
);
1709 case RTE_ETH_FILTER_HASH
:
1710 rss_filter_ptr
= (struct igb_rss_conf_ele
*)
1712 ret
= igb_config_rss_filter(dev
,
1713 &rss_filter_ptr
->filter_info
, FALSE
);
1715 TAILQ_REMOVE(&igb_filter_rss_list
,
1716 rss_filter_ptr
, entries
);
1717 rte_free(rss_filter_ptr
);
1721 PMD_DRV_LOG(WARNING
, "Filter type (%d) not supported",
1728 rte_flow_error_set(error
, EINVAL
,
1729 RTE_FLOW_ERROR_TYPE_HANDLE
,
1730 NULL
, "Failed to destroy flow");
1734 TAILQ_FOREACH(igb_flow_mem_ptr
, &igb_flow_list
, entries
) {
1735 if (igb_flow_mem_ptr
->flow
== pmd_flow
) {
1736 TAILQ_REMOVE(&igb_flow_list
,
1737 igb_flow_mem_ptr
, entries
);
1738 rte_free(igb_flow_mem_ptr
);
1746 /* remove all the n-tuple filters */
1748 igb_clear_all_ntuple_filter(struct rte_eth_dev
*dev
)
1750 struct e1000_filter_info
*filter_info
=
1751 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev
->data
->dev_private
);
1752 struct e1000_5tuple_filter
*p_5tuple
;
1753 struct e1000_2tuple_filter
*p_2tuple
;
1755 while ((p_5tuple
= TAILQ_FIRST(&filter_info
->fivetuple_list
)))
1756 igb_delete_5tuple_filter_82576(dev
, p_5tuple
);
1758 while ((p_2tuple
= TAILQ_FIRST(&filter_info
->twotuple_list
)))
1759 igb_delete_2tuple_filter(dev
, p_2tuple
);
1762 /* remove all the ether type filters */
1764 igb_clear_all_ethertype_filter(struct rte_eth_dev
*dev
)
1766 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
1767 struct e1000_filter_info
*filter_info
=
1768 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev
->data
->dev_private
);
1771 for (i
= 0; i
< E1000_MAX_ETQF_FILTERS
; i
++) {
1772 if (filter_info
->ethertype_mask
& (1 << i
)) {
1773 (void)igb_ethertype_filter_remove(filter_info
,
1775 E1000_WRITE_REG(hw
, E1000_ETQF(i
), 0);
1776 E1000_WRITE_FLUSH(hw
);
1781 /* remove the SYN filter */
1783 igb_clear_syn_filter(struct rte_eth_dev
*dev
)
1785 struct e1000_hw
*hw
= E1000_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
1786 struct e1000_filter_info
*filter_info
=
1787 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev
->data
->dev_private
);
1789 if (filter_info
->syn_info
& E1000_SYN_FILTER_ENABLE
) {
1790 filter_info
->syn_info
= 0;
1791 E1000_WRITE_REG(hw
, E1000_SYNQF(0), 0);
1792 E1000_WRITE_FLUSH(hw
);
1796 /* remove all the flex filters */
1798 igb_clear_all_flex_filter(struct rte_eth_dev
*dev
)
1800 struct e1000_filter_info
*filter_info
=
1801 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev
->data
->dev_private
);
1802 struct e1000_flex_filter
*flex_filter
;
1804 while ((flex_filter
= TAILQ_FIRST(&filter_info
->flex_list
)))
1805 igb_remove_flex_filter(dev
, flex_filter
);
1808 /* remove the rss filter */
1810 igb_clear_rss_filter(struct rte_eth_dev
*dev
)
1812 struct e1000_filter_info
*filter
=
1813 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev
->data
->dev_private
);
1815 if (filter
->rss_info
.conf
.queue_num
)
1816 igb_config_rss_filter(dev
, &filter
->rss_info
, FALSE
);
1820 igb_filterlist_flush(struct rte_eth_dev
*dev
)
1822 struct igb_ntuple_filter_ele
*ntuple_filter_ptr
;
1823 struct igb_ethertype_filter_ele
*ethertype_filter_ptr
;
1824 struct igb_eth_syn_filter_ele
*syn_filter_ptr
;
1825 struct igb_flex_filter_ele
*flex_filter_ptr
;
1826 struct igb_rss_conf_ele
*rss_filter_ptr
;
1827 struct igb_flow_mem
*igb_flow_mem_ptr
;
1828 enum rte_filter_type filter_type
;
1829 struct rte_flow
*pmd_flow
;
1831 TAILQ_FOREACH(igb_flow_mem_ptr
, &igb_flow_list
, entries
) {
1832 if (igb_flow_mem_ptr
->dev
== dev
) {
1833 pmd_flow
= igb_flow_mem_ptr
->flow
;
1834 filter_type
= pmd_flow
->filter_type
;
1836 switch (filter_type
) {
1837 case RTE_ETH_FILTER_NTUPLE
:
1839 (struct igb_ntuple_filter_ele
*)
1841 TAILQ_REMOVE(&igb_filter_ntuple_list
,
1842 ntuple_filter_ptr
, entries
);
1843 rte_free(ntuple_filter_ptr
);
1845 case RTE_ETH_FILTER_ETHERTYPE
:
1846 ethertype_filter_ptr
=
1847 (struct igb_ethertype_filter_ele
*)
1849 TAILQ_REMOVE(&igb_filter_ethertype_list
,
1850 ethertype_filter_ptr
, entries
);
1851 rte_free(ethertype_filter_ptr
);
1853 case RTE_ETH_FILTER_SYN
:
1855 (struct igb_eth_syn_filter_ele
*)
1857 TAILQ_REMOVE(&igb_filter_syn_list
,
1858 syn_filter_ptr
, entries
);
1859 rte_free(syn_filter_ptr
);
1861 case RTE_ETH_FILTER_FLEXIBLE
:
1863 (struct igb_flex_filter_ele
*)
1865 TAILQ_REMOVE(&igb_filter_flex_list
,
1866 flex_filter_ptr
, entries
);
1867 rte_free(flex_filter_ptr
);
1869 case RTE_ETH_FILTER_HASH
:
1871 (struct igb_rss_conf_ele
*)
1873 TAILQ_REMOVE(&igb_filter_rss_list
,
1874 rss_filter_ptr
, entries
);
1875 rte_free(rss_filter_ptr
);
1878 PMD_DRV_LOG(WARNING
, "Filter type"
1879 "(%d) not supported", filter_type
);
1882 TAILQ_REMOVE(&igb_flow_list
,
1885 rte_free(igb_flow_mem_ptr
->flow
);
1886 rte_free(igb_flow_mem_ptr
);
1891 /* Destroy all flow rules associated with a port on igb. */
1893 igb_flow_flush(struct rte_eth_dev
*dev
,
1894 __rte_unused
struct rte_flow_error
*error
)
1896 igb_clear_all_ntuple_filter(dev
);
1897 igb_clear_all_ethertype_filter(dev
);
1898 igb_clear_syn_filter(dev
);
1899 igb_clear_all_flex_filter(dev
);
1900 igb_clear_rss_filter(dev
);
1901 igb_filterlist_flush(dev
);
1906 const struct rte_flow_ops igb_flow_ops
= {
1907 .validate
= igb_flow_validate
,
1908 .create
= igb_flow_create
,
1909 .destroy
= igb_flow_destroy
,
1910 .flush
= igb_flow_flush
,