1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Chelsio Communications.
5 #include "base/common.h"
6 #include "cxgbe_flow.h"
8 #define __CXGBE_FILL_FS(__v, __m, fs, elem, e) \
10 if ((fs)->mask.elem && ((fs)->val.elem != (__v))) \
11 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, \
12 NULL, "Redefined match item with" \
13 " different values found"); \
14 (fs)->val.elem = (__v); \
15 (fs)->mask.elem = (__m); \
18 #define __CXGBE_FILL_FS_MEMCPY(__v, __m, fs, elem) \
20 memcpy(&(fs)->val.elem, &(__v), sizeof(__v)); \
21 memcpy(&(fs)->mask.elem, &(__m), sizeof(__m)); \
24 #define CXGBE_FILL_FS(v, m, elem) \
25 __CXGBE_FILL_FS(v, m, fs, elem, e)
27 #define CXGBE_FILL_FS_MEMCPY(v, m, elem) \
28 __CXGBE_FILL_FS_MEMCPY(v, m, fs, elem)
31 cxgbe_validate_item(const struct rte_flow_item
*i
, struct rte_flow_error
*e
)
33 /* rte_flow specification does not allow it. */
34 if (!i
->spec
&& (i
->mask
|| i
->last
))
35 return rte_flow_error_set(e
, EINVAL
, RTE_FLOW_ERROR_TYPE_ITEM
,
36 i
, "last or mask given without spec");
38 * We don't support it.
39 * Although, we can support values in last as 0's or last == spec.
40 * But this will not provide user with any additional functionality
41 * and will only increase the complexity for us.
44 return rte_flow_error_set(e
, ENOTSUP
, RTE_FLOW_ERROR_TYPE_ITEM
,
45 i
, "last is not supported by chelsio pmd");
50 cxgbe_fill_filter_region(struct adapter
*adap
,
51 struct ch_filter_specification
*fs
)
53 struct tp_params
*tp
= &adap
->params
.tp
;
54 u64 hash_filter_mask
= tp
->hash_filter_mask
;
59 if (!is_hashfilter(adap
))
63 uint8_t biton
[16] = {0xff, 0xff, 0xff, 0xff,
64 0xff, 0xff, 0xff, 0xff,
65 0xff, 0xff, 0xff, 0xff,
66 0xff, 0xff, 0xff, 0xff};
67 uint8_t bitoff
[16] = {0};
69 if (!memcmp(fs
->val
.lip
, bitoff
, sizeof(bitoff
)) ||
70 !memcmp(fs
->val
.fip
, bitoff
, sizeof(bitoff
)) ||
71 memcmp(fs
->mask
.lip
, biton
, sizeof(biton
)) ||
72 memcmp(fs
->mask
.fip
, biton
, sizeof(biton
)))
75 uint32_t biton
= 0xffffffff;
76 uint32_t bitoff
= 0x0U
;
78 if (!memcmp(fs
->val
.lip
, &bitoff
, sizeof(bitoff
)) ||
79 !memcmp(fs
->val
.fip
, &bitoff
, sizeof(bitoff
)) ||
80 memcmp(fs
->mask
.lip
, &biton
, sizeof(biton
)) ||
81 memcmp(fs
->mask
.fip
, &biton
, sizeof(biton
)))
85 if (!fs
->val
.lport
|| fs
->mask
.lport
!= 0xffff)
87 if (!fs
->val
.fport
|| fs
->mask
.fport
!= 0xffff)
90 if (tp
->protocol_shift
>= 0)
91 ntuple_mask
|= (u64
)fs
->mask
.proto
<< tp
->protocol_shift
;
92 if (tp
->ethertype_shift
>= 0)
93 ntuple_mask
|= (u64
)fs
->mask
.ethtype
<< tp
->ethertype_shift
;
94 if (tp
->port_shift
>= 0)
95 ntuple_mask
|= (u64
)fs
->mask
.iport
<< tp
->port_shift
;
96 if (tp
->macmatch_shift
>= 0)
97 ntuple_mask
|= (u64
)fs
->mask
.macidx
<< tp
->macmatch_shift
;
99 if (ntuple_mask
!= hash_filter_mask
)
102 fs
->cap
= 1; /* use hash region */
106 ch_rte_parsetype_eth(const void *dmask
, const struct rte_flow_item
*item
,
107 struct ch_filter_specification
*fs
,
108 struct rte_flow_error
*e
)
110 const struct rte_flow_item_eth
*spec
= item
->spec
;
111 const struct rte_flow_item_eth
*umask
= item
->mask
;
112 const struct rte_flow_item_eth
*mask
;
114 /* If user has not given any mask, then use chelsio supported mask. */
115 mask
= umask
? umask
: (const struct rte_flow_item_eth
*)dmask
;
117 /* we don't support SRC_MAC filtering*/
118 if (!is_zero_ether_addr(&mask
->src
))
119 return rte_flow_error_set(e
, ENOTSUP
, RTE_FLOW_ERROR_TYPE_ITEM
,
121 "src mac filtering not supported");
123 if (!is_zero_ether_addr(&mask
->dst
)) {
124 const u8
*addr
= (const u8
*)&spec
->dst
.addr_bytes
[0];
125 const u8
*m
= (const u8
*)&mask
->dst
.addr_bytes
[0];
126 struct rte_flow
*flow
= (struct rte_flow
*)fs
->private;
127 struct port_info
*pi
= (struct port_info
*)
128 (flow
->dev
->data
->dev_private
);
131 idx
= cxgbe_mpstcam_alloc(pi
, addr
, m
);
133 return rte_flow_error_set(e
, idx
,
134 RTE_FLOW_ERROR_TYPE_ITEM
,
135 NULL
, "unable to allocate mac"
137 CXGBE_FILL_FS(idx
, 0x1ff, macidx
);
140 CXGBE_FILL_FS(be16_to_cpu(spec
->type
),
141 be16_to_cpu(mask
->type
), ethtype
);
146 ch_rte_parsetype_port(const void *dmask
, const struct rte_flow_item
*item
,
147 struct ch_filter_specification
*fs
,
148 struct rte_flow_error
*e
)
150 const struct rte_flow_item_phy_port
*val
= item
->spec
;
151 const struct rte_flow_item_phy_port
*umask
= item
->mask
;
152 const struct rte_flow_item_phy_port
*mask
;
154 mask
= umask
? umask
: (const struct rte_flow_item_phy_port
*)dmask
;
156 if (val
->index
> 0x7)
157 return rte_flow_error_set(e
, EINVAL
, RTE_FLOW_ERROR_TYPE_ITEM
,
159 "port index upto 0x7 is supported");
161 CXGBE_FILL_FS(val
->index
, mask
->index
, iport
);
167 ch_rte_parsetype_udp(const void *dmask
, const struct rte_flow_item
*item
,
168 struct ch_filter_specification
*fs
,
169 struct rte_flow_error
*e
)
171 const struct rte_flow_item_udp
*val
= item
->spec
;
172 const struct rte_flow_item_udp
*umask
= item
->mask
;
173 const struct rte_flow_item_udp
*mask
;
175 mask
= umask
? umask
: (const struct rte_flow_item_udp
*)dmask
;
177 if (mask
->hdr
.dgram_len
|| mask
->hdr
.dgram_cksum
)
178 return rte_flow_error_set(e
, ENOTSUP
, RTE_FLOW_ERROR_TYPE_ITEM
,
180 "udp: only src/dst port supported");
182 CXGBE_FILL_FS(IPPROTO_UDP
, 0xff, proto
);
185 CXGBE_FILL_FS(be16_to_cpu(val
->hdr
.src_port
),
186 be16_to_cpu(mask
->hdr
.src_port
), fport
);
187 CXGBE_FILL_FS(be16_to_cpu(val
->hdr
.dst_port
),
188 be16_to_cpu(mask
->hdr
.dst_port
), lport
);
193 ch_rte_parsetype_tcp(const void *dmask
, const struct rte_flow_item
*item
,
194 struct ch_filter_specification
*fs
,
195 struct rte_flow_error
*e
)
197 const struct rte_flow_item_tcp
*val
= item
->spec
;
198 const struct rte_flow_item_tcp
*umask
= item
->mask
;
199 const struct rte_flow_item_tcp
*mask
;
201 mask
= umask
? umask
: (const struct rte_flow_item_tcp
*)dmask
;
203 if (mask
->hdr
.sent_seq
|| mask
->hdr
.recv_ack
|| mask
->hdr
.data_off
||
204 mask
->hdr
.tcp_flags
|| mask
->hdr
.rx_win
|| mask
->hdr
.cksum
||
206 return rte_flow_error_set(e
, ENOTSUP
, RTE_FLOW_ERROR_TYPE_ITEM
,
208 "tcp: only src/dst port supported");
210 CXGBE_FILL_FS(IPPROTO_TCP
, 0xff, proto
);
213 CXGBE_FILL_FS(be16_to_cpu(val
->hdr
.src_port
),
214 be16_to_cpu(mask
->hdr
.src_port
), fport
);
215 CXGBE_FILL_FS(be16_to_cpu(val
->hdr
.dst_port
),
216 be16_to_cpu(mask
->hdr
.dst_port
), lport
);
221 ch_rte_parsetype_ipv4(const void *dmask
, const struct rte_flow_item
*item
,
222 struct ch_filter_specification
*fs
,
223 struct rte_flow_error
*e
)
225 const struct rte_flow_item_ipv4
*val
= item
->spec
;
226 const struct rte_flow_item_ipv4
*umask
= item
->mask
;
227 const struct rte_flow_item_ipv4
*mask
;
229 mask
= umask
? umask
: (const struct rte_flow_item_ipv4
*)dmask
;
231 if (mask
->hdr
.time_to_live
|| mask
->hdr
.type_of_service
)
232 return rte_flow_error_set(e
, ENOTSUP
, RTE_FLOW_ERROR_TYPE_ITEM
,
233 item
, "ttl/tos are not supported");
235 fs
->type
= FILTER_TYPE_IPV4
;
236 CXGBE_FILL_FS(ETHER_TYPE_IPv4
, 0xffff, ethtype
);
238 return 0; /* ipv4 wild card */
240 CXGBE_FILL_FS(val
->hdr
.next_proto_id
, mask
->hdr
.next_proto_id
, proto
);
241 CXGBE_FILL_FS_MEMCPY(val
->hdr
.dst_addr
, mask
->hdr
.dst_addr
, lip
);
242 CXGBE_FILL_FS_MEMCPY(val
->hdr
.src_addr
, mask
->hdr
.src_addr
, fip
);
248 ch_rte_parsetype_ipv6(const void *dmask
, const struct rte_flow_item
*item
,
249 struct ch_filter_specification
*fs
,
250 struct rte_flow_error
*e
)
252 const struct rte_flow_item_ipv6
*val
= item
->spec
;
253 const struct rte_flow_item_ipv6
*umask
= item
->mask
;
254 const struct rte_flow_item_ipv6
*mask
;
256 mask
= umask
? umask
: (const struct rte_flow_item_ipv6
*)dmask
;
258 if (mask
->hdr
.vtc_flow
||
259 mask
->hdr
.payload_len
|| mask
->hdr
.hop_limits
)
260 return rte_flow_error_set(e
, ENOTSUP
, RTE_FLOW_ERROR_TYPE_ITEM
,
262 "tc/flow/hop are not supported");
264 fs
->type
= FILTER_TYPE_IPV6
;
265 CXGBE_FILL_FS(ETHER_TYPE_IPv6
, 0xffff, ethtype
);
267 return 0; /* ipv6 wild card */
269 CXGBE_FILL_FS(val
->hdr
.proto
, mask
->hdr
.proto
, proto
);
270 CXGBE_FILL_FS_MEMCPY(val
->hdr
.dst_addr
, mask
->hdr
.dst_addr
, lip
);
271 CXGBE_FILL_FS_MEMCPY(val
->hdr
.src_addr
, mask
->hdr
.src_addr
, fip
);
277 cxgbe_rtef_parse_attr(struct rte_flow
*flow
, const struct rte_flow_attr
*attr
,
278 struct rte_flow_error
*e
)
281 return rte_flow_error_set(e
, ENOTSUP
, RTE_FLOW_ERROR_TYPE_ATTR
,
282 attr
, "attribute:<egress> is"
285 return rte_flow_error_set(e
, ENOTSUP
, RTE_FLOW_ERROR_TYPE_ATTR
,
286 attr
, "group parameter is"
289 flow
->fidx
= attr
->priority
? attr
->priority
- 1 : FILTER_ID_MAX
;
294 static inline int check_rxq(struct rte_eth_dev
*dev
, uint16_t rxq
)
296 struct port_info
*pi
= ethdev2pinfo(dev
);
298 if (rxq
> pi
->n_rx_qsets
)
303 static int cxgbe_validate_fidxondel(struct filter_entry
*f
, unsigned int fidx
)
305 struct adapter
*adap
= ethdev2adap(f
->dev
);
306 struct ch_filter_specification fs
= f
->fs
;
308 if (fidx
>= adap
->tids
.nftids
) {
309 dev_err(adap
, "invalid flow index %d.\n", fidx
);
312 if (!is_filter_set(&adap
->tids
, fidx
, fs
.type
)) {
313 dev_err(adap
, "Already free fidx:%d f:%p\n", fidx
, f
);
321 cxgbe_validate_fidxonadd(struct ch_filter_specification
*fs
,
322 struct adapter
*adap
, unsigned int fidx
)
324 if (is_filter_set(&adap
->tids
, fidx
, fs
->type
)) {
325 dev_err(adap
, "filter index: %d is busy.\n", fidx
);
328 if (fidx
>= adap
->tids
.nftids
) {
329 dev_err(adap
, "filter index (%u) >= max(%u)\n",
330 fidx
, adap
->tids
.nftids
);
338 cxgbe_verify_fidx(struct rte_flow
*flow
, unsigned int fidx
, uint8_t del
)
341 return 0; /* Hash filters */
342 return del
? cxgbe_validate_fidxondel(flow
->f
, fidx
) :
343 cxgbe_validate_fidxonadd(&flow
->fs
,
344 ethdev2adap(flow
->dev
), fidx
);
347 static int cxgbe_get_fidx(struct rte_flow
*flow
, unsigned int *fidx
)
349 struct ch_filter_specification
*fs
= &flow
->fs
;
350 struct adapter
*adap
= ethdev2adap(flow
->dev
);
352 /* For tcam get the next available slot, if default value specified */
353 if (flow
->fidx
== FILTER_ID_MAX
) {
356 idx
= cxgbe_alloc_ftid(adap
, fs
->type
);
358 dev_err(adap
, "unable to get a filter index in tcam\n");
361 *fidx
= (unsigned int)idx
;
370 cxgbe_get_flow_item_index(const struct rte_flow_item items
[], u32 type
)
372 const struct rte_flow_item
*i
;
373 int j
, index
= -ENOENT
;
375 for (i
= items
, j
= 0; i
->type
!= RTE_FLOW_ITEM_TYPE_END
; i
++, j
++) {
376 if (i
->type
== type
) {
386 ch_rte_parse_nat(uint8_t nmode
, struct ch_filter_specification
*fs
)
389 * BIT_0 = [src_ip], BIT_1 = [dst_ip]
390 * BIT_2 = [src_port], BIT_3 = [dst_port]
392 * Only below cases are supported as per our spec.
396 fs
->nat_mode
= NAT_MODE_NONE
;
399 fs
->nat_mode
= NAT_MODE_DIP
;
402 fs
->nat_mode
= NAT_MODE_SIP_SP
;
405 fs
->nat_mode
= NAT_MODE_DIP_SIP_SP
;
408 fs
->nat_mode
= NAT_MODE_DIP_DP
;
411 fs
->nat_mode
= NAT_MODE_DIP_DP_SIP
;
414 fs
->nat_mode
= NAT_MODE_DIP_DP_SP
;
417 fs
->nat_mode
= NAT_MODE_ALL
;
427 ch_rte_parse_atype_switch(const struct rte_flow_action
*a
,
428 const struct rte_flow_item items
[],
430 struct ch_filter_specification
*fs
,
431 struct rte_flow_error
*e
)
433 const struct rte_flow_action_of_set_vlan_vid
*vlanid
;
434 const struct rte_flow_action_of_push_vlan
*pushvlan
;
435 const struct rte_flow_action_set_ipv4
*ipv4
;
436 const struct rte_flow_action_set_ipv6
*ipv6
;
437 const struct rte_flow_action_set_tp
*tp_port
;
438 const struct rte_flow_action_phy_port
*port
;
442 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID
:
443 vlanid
= (const struct rte_flow_action_of_set_vlan_vid
*)
445 fs
->newvlan
= VLAN_REWRITE
;
446 fs
->vlan
= vlanid
->vlan_vid
;
448 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN
:
449 pushvlan
= (const struct rte_flow_action_of_push_vlan
*)
451 if (pushvlan
->ethertype
!= ETHER_TYPE_VLAN
)
452 return rte_flow_error_set(e
, EINVAL
,
453 RTE_FLOW_ERROR_TYPE_ACTION
, a
,
454 "only ethertype 0x8100 "
455 "supported for push vlan.");
456 fs
->newvlan
= VLAN_INSERT
;
458 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN
:
459 fs
->newvlan
= VLAN_REMOVE
;
461 case RTE_FLOW_ACTION_TYPE_PHY_PORT
:
462 port
= (const struct rte_flow_action_phy_port
*)a
->conf
;
463 fs
->eport
= port
->index
;
465 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC
:
466 item_index
= cxgbe_get_flow_item_index(items
,
467 RTE_FLOW_ITEM_TYPE_IPV4
);
469 return rte_flow_error_set(e
, EINVAL
,
470 RTE_FLOW_ERROR_TYPE_ACTION
, a
,
471 "No RTE_FLOW_ITEM_TYPE_IPV4 "
474 ipv4
= (const struct rte_flow_action_set_ipv4
*)a
->conf
;
475 memcpy(fs
->nat_fip
, &ipv4
->ipv4_addr
, sizeof(ipv4
->ipv4_addr
));
478 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST
:
479 item_index
= cxgbe_get_flow_item_index(items
,
480 RTE_FLOW_ITEM_TYPE_IPV4
);
482 return rte_flow_error_set(e
, EINVAL
,
483 RTE_FLOW_ERROR_TYPE_ACTION
, a
,
484 "No RTE_FLOW_ITEM_TYPE_IPV4 "
487 ipv4
= (const struct rte_flow_action_set_ipv4
*)a
->conf
;
488 memcpy(fs
->nat_lip
, &ipv4
->ipv4_addr
, sizeof(ipv4
->ipv4_addr
));
491 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC
:
492 item_index
= cxgbe_get_flow_item_index(items
,
493 RTE_FLOW_ITEM_TYPE_IPV6
);
495 return rte_flow_error_set(e
, EINVAL
,
496 RTE_FLOW_ERROR_TYPE_ACTION
, a
,
497 "No RTE_FLOW_ITEM_TYPE_IPV6 "
500 ipv6
= (const struct rte_flow_action_set_ipv6
*)a
->conf
;
501 memcpy(fs
->nat_fip
, ipv6
->ipv6_addr
, sizeof(ipv6
->ipv6_addr
));
504 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST
:
505 item_index
= cxgbe_get_flow_item_index(items
,
506 RTE_FLOW_ITEM_TYPE_IPV6
);
508 return rte_flow_error_set(e
, EINVAL
,
509 RTE_FLOW_ERROR_TYPE_ACTION
, a
,
510 "No RTE_FLOW_ITEM_TYPE_IPV6 "
513 ipv6
= (const struct rte_flow_action_set_ipv6
*)a
->conf
;
514 memcpy(fs
->nat_lip
, ipv6
->ipv6_addr
, sizeof(ipv6
->ipv6_addr
));
517 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC
:
518 item_index
= cxgbe_get_flow_item_index(items
,
519 RTE_FLOW_ITEM_TYPE_TCP
);
520 if (item_index
< 0) {
522 cxgbe_get_flow_item_index(items
,
523 RTE_FLOW_ITEM_TYPE_UDP
);
525 return rte_flow_error_set(e
, EINVAL
,
526 RTE_FLOW_ERROR_TYPE_ACTION
, a
,
527 "No RTE_FLOW_ITEM_TYPE_TCP or "
528 "RTE_FLOW_ITEM_TYPE_UDP found");
531 tp_port
= (const struct rte_flow_action_set_tp
*)a
->conf
;
532 fs
->nat_fport
= be16_to_cpu(tp_port
->port
);
535 case RTE_FLOW_ACTION_TYPE_SET_TP_DST
:
536 item_index
= cxgbe_get_flow_item_index(items
,
537 RTE_FLOW_ITEM_TYPE_TCP
);
538 if (item_index
< 0) {
540 cxgbe_get_flow_item_index(items
,
541 RTE_FLOW_ITEM_TYPE_UDP
);
543 return rte_flow_error_set(e
, EINVAL
,
544 RTE_FLOW_ERROR_TYPE_ACTION
, a
,
545 "No RTE_FLOW_ITEM_TYPE_TCP or "
546 "RTE_FLOW_ITEM_TYPE_UDP found");
549 tp_port
= (const struct rte_flow_action_set_tp
*)a
->conf
;
550 fs
->nat_lport
= be16_to_cpu(tp_port
->port
);
553 case RTE_FLOW_ACTION_TYPE_MAC_SWAP
:
554 item_index
= cxgbe_get_flow_item_index(items
,
555 RTE_FLOW_ITEM_TYPE_ETH
);
557 return rte_flow_error_set(e
, EINVAL
,
558 RTE_FLOW_ERROR_TYPE_ACTION
, a
,
559 "No RTE_FLOW_ITEM_TYPE_ETH "
564 /* We are not supposed to come here */
565 return rte_flow_error_set(e
, EINVAL
,
566 RTE_FLOW_ERROR_TYPE_ACTION
, a
,
567 "Action not supported");
574 cxgbe_rtef_parse_actions(struct rte_flow
*flow
,
575 const struct rte_flow_item items
[],
576 const struct rte_flow_action action
[],
577 struct rte_flow_error
*e
)
579 struct ch_filter_specification
*fs
= &flow
->fs
;
580 uint8_t nmode
= 0, nat_ipv4
= 0, nat_ipv6
= 0;
581 const struct rte_flow_action_queue
*q
;
582 const struct rte_flow_action
*a
;
586 for (a
= action
; a
->type
!= RTE_FLOW_ACTION_TYPE_END
; a
++) {
588 case RTE_FLOW_ACTION_TYPE_VOID
:
590 case RTE_FLOW_ACTION_TYPE_DROP
:
592 return rte_flow_error_set(e
, EINVAL
,
593 RTE_FLOW_ERROR_TYPE_ACTION
, a
,
594 "specify only 1 pass/drop");
595 fs
->action
= FILTER_DROP
;
597 case RTE_FLOW_ACTION_TYPE_QUEUE
:
598 q
= (const struct rte_flow_action_queue
*)a
->conf
;
600 return rte_flow_error_set(e
, EINVAL
,
601 RTE_FLOW_ERROR_TYPE_ACTION
, q
,
602 "specify rx queue index");
603 if (check_rxq(flow
->dev
, q
->index
))
604 return rte_flow_error_set(e
, EINVAL
,
605 RTE_FLOW_ERROR_TYPE_ACTION
, q
,
608 return rte_flow_error_set(e
, EINVAL
,
609 RTE_FLOW_ERROR_TYPE_ACTION
, a
,
610 "specify only 1 pass/drop");
611 fs
->action
= FILTER_PASS
;
615 case RTE_FLOW_ACTION_TYPE_COUNT
:
618 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID
:
619 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN
:
620 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN
:
621 case RTE_FLOW_ACTION_TYPE_PHY_PORT
:
622 case RTE_FLOW_ACTION_TYPE_MAC_SWAP
:
623 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC
:
624 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST
:
627 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC
:
628 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST
:
631 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC
:
632 case RTE_FLOW_ACTION_TYPE_SET_TP_DST
:
634 /* We allow multiple switch actions, but switch is
635 * not compatible with either queue or drop
637 if (abit
++ && fs
->action
!= FILTER_SWITCH
)
638 return rte_flow_error_set(e
, EINVAL
,
639 RTE_FLOW_ERROR_TYPE_ACTION
, a
,
640 "overlapping action specified");
641 if (nat_ipv4
&& nat_ipv6
)
642 return rte_flow_error_set(e
, EINVAL
,
643 RTE_FLOW_ERROR_TYPE_ACTION
, a
,
644 "Can't have one address ipv4 and the"
647 ret
= ch_rte_parse_atype_switch(a
, items
, &nmode
, fs
,
651 fs
->action
= FILTER_SWITCH
;
654 /* Not supported action : return error */
655 return rte_flow_error_set(e
, ENOTSUP
,
656 RTE_FLOW_ERROR_TYPE_ACTION
,
657 a
, "Action not supported");
661 if (ch_rte_parse_nat(nmode
, fs
))
662 return rte_flow_error_set(e
, EINVAL
,
663 RTE_FLOW_ERROR_TYPE_ACTION
, a
,
664 "invalid settings for swich action");
668 static struct chrte_fparse parseitem
[] = {
669 [RTE_FLOW_ITEM_TYPE_ETH
] = {
670 .fptr
= ch_rte_parsetype_eth
,
671 .dmask
= &(const struct rte_flow_item_eth
){
672 .dst
.addr_bytes
= "\xff\xff\xff\xff\xff\xff",
673 .src
.addr_bytes
= "\x00\x00\x00\x00\x00\x00",
678 [RTE_FLOW_ITEM_TYPE_PHY_PORT
] = {
679 .fptr
= ch_rte_parsetype_port
,
680 .dmask
= &(const struct rte_flow_item_phy_port
){
685 [RTE_FLOW_ITEM_TYPE_IPV4
] = {
686 .fptr
= ch_rte_parsetype_ipv4
,
687 .dmask
= &rte_flow_item_ipv4_mask
,
690 [RTE_FLOW_ITEM_TYPE_IPV6
] = {
691 .fptr
= ch_rte_parsetype_ipv6
,
692 .dmask
= &rte_flow_item_ipv6_mask
,
695 [RTE_FLOW_ITEM_TYPE_UDP
] = {
696 .fptr
= ch_rte_parsetype_udp
,
697 .dmask
= &rte_flow_item_udp_mask
,
700 [RTE_FLOW_ITEM_TYPE_TCP
] = {
701 .fptr
= ch_rte_parsetype_tcp
,
702 .dmask
= &rte_flow_item_tcp_mask
,
707 cxgbe_rtef_parse_items(struct rte_flow
*flow
,
708 const struct rte_flow_item items
[],
709 struct rte_flow_error
*e
)
711 struct adapter
*adap
= ethdev2adap(flow
->dev
);
712 const struct rte_flow_item
*i
;
713 char repeat
[ARRAY_SIZE(parseitem
)] = {0};
715 for (i
= items
; i
->type
!= RTE_FLOW_ITEM_TYPE_END
; i
++) {
716 struct chrte_fparse
*idx
;
719 if (i
->type
>= ARRAY_SIZE(parseitem
))
720 return rte_flow_error_set(e
, ENOTSUP
,
721 RTE_FLOW_ERROR_TYPE_ITEM
,
722 i
, "Item not supported");
725 case RTE_FLOW_ITEM_TYPE_VOID
:
728 /* check if item is repeated */
730 return rte_flow_error_set(e
, ENOTSUP
,
731 RTE_FLOW_ERROR_TYPE_ITEM
, i
,
732 "parse items cannot be repeated (except void)");
735 /* No spec found for this pattern item. Skip it */
739 /* validate the item */
740 ret
= cxgbe_validate_item(i
, e
);
744 idx
= &flow
->item_parser
[i
->type
];
745 if (!idx
|| !idx
->fptr
) {
746 return rte_flow_error_set(e
, ENOTSUP
,
747 RTE_FLOW_ERROR_TYPE_ITEM
, i
,
748 "Item not supported");
750 ret
= idx
->fptr(idx
->dmask
, i
, &flow
->fs
, e
);
757 cxgbe_fill_filter_region(adap
, &flow
->fs
);
763 cxgbe_flow_parse(struct rte_flow
*flow
,
764 const struct rte_flow_attr
*attr
,
765 const struct rte_flow_item item
[],
766 const struct rte_flow_action action
[],
767 struct rte_flow_error
*e
)
770 /* parse user request into ch_filter_specification */
771 ret
= cxgbe_rtef_parse_attr(flow
, attr
, e
);
774 ret
= cxgbe_rtef_parse_items(flow
, item
, e
);
777 return cxgbe_rtef_parse_actions(flow
, item
, action
, e
);
780 static int __cxgbe_flow_create(struct rte_eth_dev
*dev
, struct rte_flow
*flow
)
782 struct ch_filter_specification
*fs
= &flow
->fs
;
783 struct adapter
*adap
= ethdev2adap(dev
);
784 struct tid_info
*t
= &adap
->tids
;
785 struct filter_ctx ctx
;
789 if (cxgbe_get_fidx(flow
, &fidx
))
791 if (cxgbe_verify_fidx(flow
, fidx
, 0))
794 t4_init_completion(&ctx
.completion
);
795 /* go create the filter */
796 err
= cxgbe_set_filter(dev
, fidx
, fs
, &ctx
);
798 dev_err(adap
, "Error %d while creating filter.\n", err
);
802 /* Poll the FW for reply */
803 err
= cxgbe_poll_for_completion(&adap
->sge
.fw_evtq
,
808 dev_err(adap
, "Filter set operation timed out (%d)\n", err
);
812 dev_err(adap
, "Hardware error %d while creating the filter.\n",
817 if (fs
->cap
) { /* to destroy the filter */
818 flow
->fidx
= ctx
.tid
;
819 flow
->f
= lookup_tid(t
, ctx
.tid
);
822 flow
->f
= &adap
->tids
.ftid_tab
[fidx
];
828 static struct rte_flow
*
829 cxgbe_flow_create(struct rte_eth_dev
*dev
,
830 const struct rte_flow_attr
*attr
,
831 const struct rte_flow_item item
[],
832 const struct rte_flow_action action
[],
833 struct rte_flow_error
*e
)
835 struct rte_flow
*flow
;
838 flow
= t4_os_alloc(sizeof(struct rte_flow
));
840 rte_flow_error_set(e
, ENOMEM
, RTE_FLOW_ERROR_TYPE_HANDLE
,
841 NULL
, "Unable to allocate memory for"
846 flow
->item_parser
= parseitem
;
848 flow
->fs
.private = (void *)flow
;
850 if (cxgbe_flow_parse(flow
, attr
, item
, action
, e
)) {
855 /* go, interact with cxgbe_filter */
856 ret
= __cxgbe_flow_create(dev
, flow
);
858 rte_flow_error_set(e
, ret
, RTE_FLOW_ERROR_TYPE_HANDLE
,
859 NULL
, "Unable to create flow rule");
864 flow
->f
->private = flow
; /* Will be used during flush */
869 static int __cxgbe_flow_destroy(struct rte_eth_dev
*dev
, struct rte_flow
*flow
)
871 struct adapter
*adap
= ethdev2adap(dev
);
872 struct filter_entry
*f
= flow
->f
;
873 struct ch_filter_specification
*fs
;
874 struct filter_ctx ctx
;
878 if (cxgbe_verify_fidx(flow
, flow
->fidx
, 1))
881 t4_init_completion(&ctx
.completion
);
882 err
= cxgbe_del_filter(dev
, flow
->fidx
, fs
, &ctx
);
884 dev_err(adap
, "Error %d while deleting filter.\n", err
);
888 /* Poll the FW for reply */
889 err
= cxgbe_poll_for_completion(&adap
->sge
.fw_evtq
,
894 dev_err(adap
, "Filter delete operation timed out (%d)\n", err
);
898 dev_err(adap
, "Hardware error %d while deleting the filter.\n",
904 if (fs
->mask
.macidx
) {
905 struct port_info
*pi
= (struct port_info
*)
906 (dev
->data
->dev_private
);
909 ret
= cxgbe_mpstcam_remove(pi
, fs
->val
.macidx
);
918 cxgbe_flow_destroy(struct rte_eth_dev
*dev
, struct rte_flow
*flow
,
919 struct rte_flow_error
*e
)
923 ret
= __cxgbe_flow_destroy(dev
, flow
);
925 return rte_flow_error_set(e
, ret
, RTE_FLOW_ERROR_TYPE_HANDLE
,
926 flow
, "error destroying filter.");
931 static int __cxgbe_flow_query(struct rte_flow
*flow
, u64
*count
,
934 struct adapter
*adap
= ethdev2adap(flow
->dev
);
935 struct ch_filter_specification fs
= flow
->f
->fs
;
936 unsigned int fidx
= flow
->fidx
;
939 ret
= cxgbe_get_filter_count(adap
, fidx
, count
, fs
.cap
, 0);
942 return cxgbe_get_filter_count(adap
, fidx
, byte_count
, fs
.cap
, 1);
946 cxgbe_flow_query(struct rte_eth_dev
*dev
, struct rte_flow
*flow
,
947 const struct rte_flow_action
*action
, void *data
,
948 struct rte_flow_error
*e
)
950 struct adapter
*adap
= ethdev2adap(flow
->dev
);
951 struct ch_filter_specification fs
;
952 struct rte_flow_query_count
*c
;
953 struct filter_entry
*f
;
961 if (action
->type
!= RTE_FLOW_ACTION_TYPE_COUNT
)
962 return rte_flow_error_set(e
, ENOTSUP
,
963 RTE_FLOW_ERROR_TYPE_ACTION
, NULL
,
964 "only count supported for query");
967 * This is a valid operation, Since we are allowed to do chelsio
968 * specific operations in rte side of our code but not vise-versa
970 * So, fs can be queried/modified here BUT rte_flow_query_count
971 * cannot be worked on by the lower layer since we want to maintain
972 * it as rte_flow agnostic.
975 return rte_flow_error_set(e
, EINVAL
, RTE_FLOW_ERROR_TYPE_ACTION
,
976 &fs
, "filter hit counters were not"
977 " enabled during filter creation");
979 c
= (struct rte_flow_query_count
*)data
;
980 ret
= __cxgbe_flow_query(flow
, &c
->hits
, &c
->bytes
);
982 return rte_flow_error_set(e
, -ret
, RTE_FLOW_ERROR_TYPE_ACTION
,
983 f
, "cxgbe pmd failed to"
986 /* Query was successful */
990 cxgbe_clear_filter_count(adap
, flow
->fidx
, f
->fs
.cap
, true);
992 return 0; /* success / partial_success */
996 cxgbe_flow_validate(struct rte_eth_dev
*dev
,
997 const struct rte_flow_attr
*attr
,
998 const struct rte_flow_item item
[],
999 const struct rte_flow_action action
[],
1000 struct rte_flow_error
*e
)
1002 struct adapter
*adap
= ethdev2adap(dev
);
1003 struct rte_flow
*flow
;
1007 flow
= t4_os_alloc(sizeof(struct rte_flow
));
1009 return rte_flow_error_set(e
, ENOMEM
, RTE_FLOW_ERROR_TYPE_HANDLE
,
1011 "Unable to allocate memory for filter_entry");
1013 flow
->item_parser
= parseitem
;
1016 ret
= cxgbe_flow_parse(flow
, attr
, item
, action
, e
);
1022 if (validate_filter(adap
, &flow
->fs
)) {
1024 return rte_flow_error_set(e
, EINVAL
, RTE_FLOW_ERROR_TYPE_HANDLE
,
1026 "validation failed. Check f/w config file.");
1029 if (cxgbe_get_fidx(flow
, &fidx
)) {
1031 return rte_flow_error_set(e
, ENOMEM
, RTE_FLOW_ERROR_TYPE_HANDLE
,
1032 NULL
, "no memory in tcam.");
1035 if (cxgbe_verify_fidx(flow
, fidx
, 0)) {
1037 return rte_flow_error_set(e
, EINVAL
, RTE_FLOW_ERROR_TYPE_HANDLE
,
1038 NULL
, "validation failed");
1046 * @ret : > 0 filter destroyed succsesfully
1047 * < 0 error destroying filter
1048 * == 1 filter not active / not found
1051 cxgbe_check_n_destroy(struct filter_entry
*f
, struct rte_eth_dev
*dev
,
1052 struct rte_flow_error
*e
)
1054 if (f
&& (f
->valid
|| f
->pending
) &&
1055 f
->dev
== dev
&& /* Only if user has asked for this port */
1056 f
->private) /* We (rte_flow) created this filter */
1057 return cxgbe_flow_destroy(dev
, (struct rte_flow
*)f
->private,
1062 static int cxgbe_flow_flush(struct rte_eth_dev
*dev
, struct rte_flow_error
*e
)
1064 struct adapter
*adap
= ethdev2adap(dev
);
1068 if (adap
->tids
.ftid_tab
) {
1069 struct filter_entry
*f
= &adap
->tids
.ftid_tab
[0];
1071 for (i
= 0; i
< adap
->tids
.nftids
; i
++, f
++) {
1072 ret
= cxgbe_check_n_destroy(f
, dev
, e
);
1078 if (is_hashfilter(adap
) && adap
->tids
.tid_tab
) {
1079 struct filter_entry
*f
;
1081 for (i
= adap
->tids
.hash_base
; i
<= adap
->tids
.ntids
; i
++) {
1082 f
= (struct filter_entry
*)adap
->tids
.tid_tab
[i
];
1084 ret
= cxgbe_check_n_destroy(f
, dev
, e
);
1091 return ret
>= 0 ? 0 : ret
;
1094 static const struct rte_flow_ops cxgbe_flow_ops
= {
1095 .validate
= cxgbe_flow_validate
,
1096 .create
= cxgbe_flow_create
,
1097 .destroy
= cxgbe_flow_destroy
,
1098 .flush
= cxgbe_flow_flush
,
1099 .query
= cxgbe_flow_query
,
1104 cxgbe_dev_filter_ctrl(struct rte_eth_dev
*dev
,
1105 enum rte_filter_type filter_type
,
1106 enum rte_filter_op filter_op
,
1112 switch (filter_type
) {
1113 case RTE_ETH_FILTER_GENERIC
:
1114 if (filter_op
!= RTE_ETH_FILTER_GET
)
1116 *(const void **)arg
= &cxgbe_flow_ops
;