1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Ethernet driver
4 * Copyright (C) 2021 Marvell.
8 #include <linux/netdevice.h>
9 #include <linux/etherdevice.h>
10 #include <linux/inetdevice.h>
11 #include <linux/rhashtable.h>
12 #include <linux/bitfield.h>
13 #include <net/flow_dissector.h>
14 #include <net/pkt_cls.h>
15 #include <net/tc_act/tc_gact.h>
16 #include <net/tc_act/tc_mirred.h>
17 #include <net/tc_act/tc_vlan.h>
21 #include "otx2_common.h"
23 /* Egress rate limiting definitions */
24 #define MAX_BURST_EXPONENT 0x0FULL
25 #define MAX_BURST_MANTISSA 0xFFULL
26 #define MAX_BURST_SIZE 130816ULL
27 #define MAX_RATE_DIVIDER_EXPONENT 12ULL
28 #define MAX_RATE_EXPONENT 0x0FULL
29 #define MAX_RATE_MANTISSA 0xFFULL
31 /* Bitfields in NIX_TLX_PIR register */
32 #define TLX_RATE_MANTISSA GENMASK_ULL(8, 1)
33 #define TLX_RATE_EXPONENT GENMASK_ULL(12, 9)
34 #define TLX_RATE_DIVIDER_EXPONENT GENMASK_ULL(16, 13)
35 #define TLX_BURST_MANTISSA GENMASK_ULL(36, 29)
36 #define TLX_BURST_EXPONENT GENMASK_ULL(40, 37)
38 struct otx2_tc_flow_stats
{
45 struct rhash_head node
;
49 struct otx2_tc_flow_stats stats
;
50 spinlock_t lock
; /* lock for stats */
57 int otx2_tc_alloc_ent_bitmap(struct otx2_nic
*nic
)
59 struct otx2_tc_info
*tc
= &nic
->tc_info
;
61 if (!nic
->flow_cfg
->max_flows
|| is_otx2_vf(nic
->pcifunc
))
64 /* Max flows changed, free the existing bitmap */
65 kfree(tc
->tc_entries_bitmap
);
67 tc
->tc_entries_bitmap
=
68 kcalloc(BITS_TO_LONGS(nic
->flow_cfg
->max_flows
),
69 sizeof(long), GFP_KERNEL
);
70 if (!tc
->tc_entries_bitmap
) {
71 netdev_err(nic
->netdev
,
72 "Unable to alloc TC flow entries bitmap\n");
78 EXPORT_SYMBOL(otx2_tc_alloc_ent_bitmap
);
80 static void otx2_get_egress_burst_cfg(u32 burst
, u32
*burst_exp
,
85 /* Burst is calculated as
86 * ((256 + BURST_MANTISSA) << (1 + BURST_EXPONENT)) / 256
87 * Max supported burst size is 130,816 bytes.
89 burst
= min_t(u32
, burst
, MAX_BURST_SIZE
);
91 *burst_exp
= ilog2(burst
) ? ilog2(burst
) - 1 : 0;
92 tmp
= burst
- rounddown_pow_of_two(burst
);
93 if (burst
< MAX_BURST_MANTISSA
)
94 *burst_mantissa
= tmp
* 2;
96 *burst_mantissa
= tmp
/ (1ULL << (*burst_exp
- 7));
98 *burst_exp
= MAX_BURST_EXPONENT
;
99 *burst_mantissa
= MAX_BURST_MANTISSA
;
103 static void otx2_get_egress_rate_cfg(u32 maxrate
, u32
*exp
,
104 u32
*mantissa
, u32
*div_exp
)
108 /* Rate calculation by hardware
110 * PIR_ADD = ((256 + mantissa) << exp) / 256
111 * rate = (2 * PIR_ADD) / ( 1 << div_exp)
112 * The resultant rate is in Mbps.
115 /* 2Mbps to 100Gbps can be expressed with div_exp = 0.
116 * Setting this to '0' will ease the calculation of
117 * exponent and mantissa.
122 *exp
= ilog2(maxrate
) ? ilog2(maxrate
) - 1 : 0;
123 tmp
= maxrate
- rounddown_pow_of_two(maxrate
);
124 if (maxrate
< MAX_RATE_MANTISSA
)
127 *mantissa
= tmp
/ (1ULL << (*exp
- 7));
129 /* Instead of disabling rate limiting, set all values to max */
130 *exp
= MAX_RATE_EXPONENT
;
131 *mantissa
= MAX_RATE_MANTISSA
;
135 static int otx2_set_matchall_egress_rate(struct otx2_nic
*nic
, u32 burst
, u32 maxrate
)
137 struct otx2_hw
*hw
= &nic
->hw
;
138 struct nix_txschq_config
*req
;
139 u32 burst_exp
, burst_mantissa
;
140 u32 exp
, mantissa
, div_exp
;
143 /* All SQs share the same TL4, so pick the first scheduler */
144 txschq
= hw
->txschq_list
[NIX_TXSCH_LVL_TL4
][0];
146 /* Get exponent and mantissa values from the desired rate */
147 otx2_get_egress_burst_cfg(burst
, &burst_exp
, &burst_mantissa
);
148 otx2_get_egress_rate_cfg(maxrate
, &exp
, &mantissa
, &div_exp
);
150 mutex_lock(&nic
->mbox
.lock
);
151 req
= otx2_mbox_alloc_msg_nix_txschq_cfg(&nic
->mbox
);
153 mutex_unlock(&nic
->mbox
.lock
);
157 req
->lvl
= NIX_TXSCH_LVL_TL4
;
159 req
->reg
[0] = NIX_AF_TL4X_PIR(txschq
);
160 req
->regval
[0] = FIELD_PREP(TLX_BURST_EXPONENT
, burst_exp
) |
161 FIELD_PREP(TLX_BURST_MANTISSA
, burst_mantissa
) |
162 FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT
, div_exp
) |
163 FIELD_PREP(TLX_RATE_EXPONENT
, exp
) |
164 FIELD_PREP(TLX_RATE_MANTISSA
, mantissa
) | BIT_ULL(0);
166 err
= otx2_sync_mbox_msg(&nic
->mbox
);
167 mutex_unlock(&nic
->mbox
.lock
);
171 static int otx2_tc_validate_flow(struct otx2_nic
*nic
,
172 struct flow_action
*actions
,
173 struct netlink_ext_ack
*extack
)
175 if (nic
->flags
& OTX2_FLAG_INTF_DOWN
) {
176 NL_SET_ERR_MSG_MOD(extack
, "Interface not initialized");
180 if (!flow_action_has_entries(actions
)) {
181 NL_SET_ERR_MSG_MOD(extack
, "MATCHALL offload called with no action");
185 if (!flow_offload_has_one_action(actions
)) {
186 NL_SET_ERR_MSG_MOD(extack
,
187 "Egress MATCHALL offload supports only 1 policing action");
193 static int otx2_tc_egress_matchall_install(struct otx2_nic
*nic
,
194 struct tc_cls_matchall_offload
*cls
)
196 struct netlink_ext_ack
*extack
= cls
->common
.extack
;
197 struct flow_action
*actions
= &cls
->rule
->action
;
198 struct flow_action_entry
*entry
;
202 err
= otx2_tc_validate_flow(nic
, actions
, extack
);
206 if (nic
->flags
& OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED
) {
207 NL_SET_ERR_MSG_MOD(extack
,
208 "Only one Egress MATCHALL ratelimiter can be offloaded");
212 entry
= &cls
->rule
->action
.entries
[0];
214 case FLOW_ACTION_POLICE
:
215 if (entry
->police
.rate_pkt_ps
) {
216 NL_SET_ERR_MSG_MOD(extack
, "QoS offload not support packets per second");
219 /* Convert bytes per second to Mbps */
220 rate
= entry
->police
.rate_bytes_ps
* 8;
221 rate
= max_t(u32
, rate
/ 1000000, 1);
222 err
= otx2_set_matchall_egress_rate(nic
, entry
->police
.burst
, rate
);
225 nic
->flags
|= OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED
;
228 NL_SET_ERR_MSG_MOD(extack
,
229 "Only police action is supported with Egress MATCHALL offload");
236 static int otx2_tc_egress_matchall_delete(struct otx2_nic
*nic
,
237 struct tc_cls_matchall_offload
*cls
)
239 struct netlink_ext_ack
*extack
= cls
->common
.extack
;
242 if (nic
->flags
& OTX2_FLAG_INTF_DOWN
) {
243 NL_SET_ERR_MSG_MOD(extack
, "Interface not initialized");
247 err
= otx2_set_matchall_egress_rate(nic
, 0, 0);
248 nic
->flags
&= ~OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED
;
252 static int otx2_tc_act_set_police(struct otx2_nic
*nic
,
253 struct otx2_tc_flow
*node
,
254 struct flow_cls_offload
*f
,
255 u64 rate
, u32 burst
, u32 mark
,
256 struct npc_install_flow_req
*req
, bool pps
)
258 struct netlink_ext_ack
*extack
= f
->common
.extack
;
259 struct otx2_hw
*hw
= &nic
->hw
;
262 rq_idx
= find_first_zero_bit(&nic
->rq_bmap
, hw
->rx_queues
);
263 if (rq_idx
>= hw
->rx_queues
) {
264 NL_SET_ERR_MSG_MOD(extack
, "Police action rules exceeded");
268 mutex_lock(&nic
->mbox
.lock
);
270 rc
= cn10k_alloc_leaf_profile(nic
, &node
->leaf_profile
);
272 mutex_unlock(&nic
->mbox
.lock
);
276 rc
= cn10k_set_ipolicer_rate(nic
, node
->leaf_profile
, burst
, rate
, pps
);
280 rc
= cn10k_map_unmap_rq_policer(nic
, rq_idx
, node
->leaf_profile
, true);
284 mutex_unlock(&nic
->mbox
.lock
);
286 req
->match_id
= mark
& 0xFFFFULL
;
288 req
->op
= NIX_RX_ACTIONOP_UCAST
;
289 set_bit(rq_idx
, &nic
->rq_bmap
);
290 node
->is_act_police
= true;
296 if (cn10k_free_leaf_profile(nic
, node
->leaf_profile
))
297 netdev_err(nic
->netdev
,
298 "Unable to free leaf bandwidth profile(%d)\n",
300 mutex_unlock(&nic
->mbox
.lock
);
304 static int otx2_tc_parse_actions(struct otx2_nic
*nic
,
305 struct flow_action
*flow_action
,
306 struct npc_install_flow_req
*req
,
307 struct flow_cls_offload
*f
,
308 struct otx2_tc_flow
*node
)
310 struct netlink_ext_ack
*extack
= f
->common
.extack
;
311 struct flow_action_entry
*act
;
312 struct net_device
*target
;
313 struct otx2_nic
*priv
;
320 if (!flow_action_has_entries(flow_action
)) {
321 NL_SET_ERR_MSG_MOD(extack
, "no tc actions specified");
325 flow_action_for_each(i
, act
, flow_action
) {
327 case FLOW_ACTION_DROP
:
328 req
->op
= NIX_RX_ACTIONOP_DROP
;
330 case FLOW_ACTION_ACCEPT
:
331 req
->op
= NIX_RX_ACTION_DEFAULT
;
333 case FLOW_ACTION_REDIRECT_INGRESS
:
335 priv
= netdev_priv(target
);
336 /* npc_install_flow_req doesn't support passing a target pcifunc */
337 if (rvu_get_pf(nic
->pcifunc
) != rvu_get_pf(priv
->pcifunc
)) {
338 NL_SET_ERR_MSG_MOD(extack
,
339 "can't redirect to other pf/vf");
342 req
->vf
= priv
->pcifunc
& RVU_PFVF_FUNC_MASK
;
343 req
->op
= NIX_RX_ACTION_DEFAULT
;
345 case FLOW_ACTION_VLAN_POP
:
346 req
->vtag0_valid
= true;
347 /* use RX_VTAG_TYPE7 which is initialized to strip vlan tag */
348 req
->vtag0_type
= NIX_AF_LFX_RX_VTAG_TYPE7
;
350 case FLOW_ACTION_POLICE
:
351 /* Ingress ratelimiting is not supported on OcteonTx2 */
352 if (is_dev_otx2(nic
->pdev
)) {
353 NL_SET_ERR_MSG_MOD(extack
,
354 "Ingress policing not supported on this platform");
358 if (act
->police
.rate_bytes_ps
> 0) {
359 rate
= act
->police
.rate_bytes_ps
* 8;
360 burst
= act
->police
.burst
;
361 } else if (act
->police
.rate_pkt_ps
> 0) {
362 /* The algorithm used to calculate rate
363 * mantissa, exponent values for a given token
364 * rate (token can be byte or packet) requires
365 * token rate to be mutiplied by 8.
367 rate
= act
->police
.rate_pkt_ps
* 8;
368 burst
= act
->police
.burst_pkt
;
373 case FLOW_ACTION_MARK
:
382 NL_SET_ERR_MSG_MOD(extack
,
383 "rate limit police offload requires a single action");
388 return otx2_tc_act_set_police(nic
, node
, f
, rate
, burst
,
394 static int otx2_tc_prepare_flow(struct otx2_nic
*nic
, struct otx2_tc_flow
*node
,
395 struct flow_cls_offload
*f
,
396 struct npc_install_flow_req
*req
)
398 struct netlink_ext_ack
*extack
= f
->common
.extack
;
399 struct flow_msg
*flow_spec
= &req
->packet
;
400 struct flow_msg
*flow_mask
= &req
->mask
;
401 struct flow_dissector
*dissector
;
402 struct flow_rule
*rule
;
405 rule
= flow_cls_offload_flow_rule(f
);
406 dissector
= rule
->match
.dissector
;
408 if ((dissector
->used_keys
&
409 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL
) |
410 BIT(FLOW_DISSECTOR_KEY_BASIC
) |
411 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS
) |
412 BIT(FLOW_DISSECTOR_KEY_VLAN
) |
413 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS
) |
414 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS
) |
415 BIT(FLOW_DISSECTOR_KEY_PORTS
) |
416 BIT(FLOW_DISSECTOR_KEY_IP
)))) {
417 netdev_info(nic
->netdev
, "unsupported flow used key 0x%x",
418 dissector
->used_keys
);
422 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_BASIC
)) {
423 struct flow_match_basic match
;
425 flow_rule_match_basic(rule
, &match
);
427 /* All EtherTypes can be matched, no hw limitation */
428 flow_spec
->etype
= match
.key
->n_proto
;
429 flow_mask
->etype
= match
.mask
->n_proto
;
430 req
->features
|= BIT_ULL(NPC_ETYPE
);
432 if (match
.mask
->ip_proto
&&
433 (match
.key
->ip_proto
!= IPPROTO_TCP
&&
434 match
.key
->ip_proto
!= IPPROTO_UDP
&&
435 match
.key
->ip_proto
!= IPPROTO_SCTP
&&
436 match
.key
->ip_proto
!= IPPROTO_ICMP
&&
437 match
.key
->ip_proto
!= IPPROTO_ICMPV6
)) {
438 netdev_info(nic
->netdev
,
439 "ip_proto=0x%x not supported\n",
440 match
.key
->ip_proto
);
443 if (match
.mask
->ip_proto
)
444 ip_proto
= match
.key
->ip_proto
;
446 if (ip_proto
== IPPROTO_UDP
)
447 req
->features
|= BIT_ULL(NPC_IPPROTO_UDP
);
448 else if (ip_proto
== IPPROTO_TCP
)
449 req
->features
|= BIT_ULL(NPC_IPPROTO_TCP
);
450 else if (ip_proto
== IPPROTO_SCTP
)
451 req
->features
|= BIT_ULL(NPC_IPPROTO_SCTP
);
452 else if (ip_proto
== IPPROTO_ICMP
)
453 req
->features
|= BIT_ULL(NPC_IPPROTO_ICMP
);
454 else if (ip_proto
== IPPROTO_ICMPV6
)
455 req
->features
|= BIT_ULL(NPC_IPPROTO_ICMP6
);
458 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_ETH_ADDRS
)) {
459 struct flow_match_eth_addrs match
;
461 flow_rule_match_eth_addrs(rule
, &match
);
462 if (!is_zero_ether_addr(match
.mask
->src
)) {
463 NL_SET_ERR_MSG_MOD(extack
, "src mac match not supported");
467 if (!is_zero_ether_addr(match
.mask
->dst
)) {
468 ether_addr_copy(flow_spec
->dmac
, (u8
*)&match
.key
->dst
);
469 ether_addr_copy(flow_mask
->dmac
,
470 (u8
*)&match
.mask
->dst
);
471 req
->features
|= BIT_ULL(NPC_DMAC
);
475 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_IP
)) {
476 struct flow_match_ip match
;
478 flow_rule_match_ip(rule
, &match
);
479 if ((ntohs(flow_spec
->etype
) != ETH_P_IP
) &&
481 NL_SET_ERR_MSG_MOD(extack
, "tos not supported");
484 if (match
.mask
->ttl
) {
485 NL_SET_ERR_MSG_MOD(extack
, "ttl not supported");
488 flow_spec
->tos
= match
.key
->tos
;
489 flow_mask
->tos
= match
.mask
->tos
;
490 req
->features
|= BIT_ULL(NPC_TOS
);
493 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_VLAN
)) {
494 struct flow_match_vlan match
;
495 u16 vlan_tci
, vlan_tci_mask
;
497 flow_rule_match_vlan(rule
, &match
);
499 if (ntohs(match
.key
->vlan_tpid
) != ETH_P_8021Q
) {
500 netdev_err(nic
->netdev
, "vlan tpid 0x%x not supported\n",
501 ntohs(match
.key
->vlan_tpid
));
505 if (match
.mask
->vlan_id
||
506 match
.mask
->vlan_dei
||
507 match
.mask
->vlan_priority
) {
508 vlan_tci
= match
.key
->vlan_id
|
509 match
.key
->vlan_dei
<< 12 |
510 match
.key
->vlan_priority
<< 13;
512 vlan_tci_mask
= match
.mask
->vlan_id
|
513 match
.mask
->vlan_dei
<< 12 |
514 match
.mask
->vlan_priority
<< 13;
516 flow_spec
->vlan_tci
= htons(vlan_tci
);
517 flow_mask
->vlan_tci
= htons(vlan_tci_mask
);
518 req
->features
|= BIT_ULL(NPC_OUTER_VID
);
522 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_IPV4_ADDRS
)) {
523 struct flow_match_ipv4_addrs match
;
525 flow_rule_match_ipv4_addrs(rule
, &match
);
527 flow_spec
->ip4dst
= match
.key
->dst
;
528 flow_mask
->ip4dst
= match
.mask
->dst
;
529 req
->features
|= BIT_ULL(NPC_DIP_IPV4
);
531 flow_spec
->ip4src
= match
.key
->src
;
532 flow_mask
->ip4src
= match
.mask
->src
;
533 req
->features
|= BIT_ULL(NPC_SIP_IPV4
);
534 } else if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_IPV6_ADDRS
)) {
535 struct flow_match_ipv6_addrs match
;
537 flow_rule_match_ipv6_addrs(rule
, &match
);
539 if (ipv6_addr_loopback(&match
.key
->dst
) ||
540 ipv6_addr_loopback(&match
.key
->src
)) {
541 NL_SET_ERR_MSG_MOD(extack
,
542 "Flow matching IPv6 loopback addr not supported");
546 if (!ipv6_addr_any(&match
.mask
->dst
)) {
547 memcpy(&flow_spec
->ip6dst
,
548 (struct in6_addr
*)&match
.key
->dst
,
549 sizeof(flow_spec
->ip6dst
));
550 memcpy(&flow_mask
->ip6dst
,
551 (struct in6_addr
*)&match
.mask
->dst
,
552 sizeof(flow_spec
->ip6dst
));
553 req
->features
|= BIT_ULL(NPC_DIP_IPV6
);
556 if (!ipv6_addr_any(&match
.mask
->src
)) {
557 memcpy(&flow_spec
->ip6src
,
558 (struct in6_addr
*)&match
.key
->src
,
559 sizeof(flow_spec
->ip6src
));
560 memcpy(&flow_mask
->ip6src
,
561 (struct in6_addr
*)&match
.mask
->src
,
562 sizeof(flow_spec
->ip6src
));
563 req
->features
|= BIT_ULL(NPC_SIP_IPV6
);
567 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_PORTS
)) {
568 struct flow_match_ports match
;
570 flow_rule_match_ports(rule
, &match
);
572 flow_spec
->dport
= match
.key
->dst
;
573 flow_mask
->dport
= match
.mask
->dst
;
575 if (flow_mask
->dport
) {
576 if (ip_proto
== IPPROTO_UDP
)
577 req
->features
|= BIT_ULL(NPC_DPORT_UDP
);
578 else if (ip_proto
== IPPROTO_TCP
)
579 req
->features
|= BIT_ULL(NPC_DPORT_TCP
);
580 else if (ip_proto
== IPPROTO_SCTP
)
581 req
->features
|= BIT_ULL(NPC_DPORT_SCTP
);
584 flow_spec
->sport
= match
.key
->src
;
585 flow_mask
->sport
= match
.mask
->src
;
587 if (flow_mask
->sport
) {
588 if (ip_proto
== IPPROTO_UDP
)
589 req
->features
|= BIT_ULL(NPC_SPORT_UDP
);
590 else if (ip_proto
== IPPROTO_TCP
)
591 req
->features
|= BIT_ULL(NPC_SPORT_TCP
);
592 else if (ip_proto
== IPPROTO_SCTP
)
593 req
->features
|= BIT_ULL(NPC_SPORT_SCTP
);
597 return otx2_tc_parse_actions(nic
, &rule
->action
, req
, f
, node
);
600 static int otx2_del_mcam_flow_entry(struct otx2_nic
*nic
, u16 entry
)
602 struct npc_delete_flow_req
*req
;
605 mutex_lock(&nic
->mbox
.lock
);
606 req
= otx2_mbox_alloc_msg_npc_delete_flow(&nic
->mbox
);
608 mutex_unlock(&nic
->mbox
.lock
);
614 /* Send message to AF */
615 err
= otx2_sync_mbox_msg(&nic
->mbox
);
617 netdev_err(nic
->netdev
, "Failed to delete MCAM flow entry %d\n",
619 mutex_unlock(&nic
->mbox
.lock
);
622 mutex_unlock(&nic
->mbox
.lock
);
627 static int otx2_tc_del_flow(struct otx2_nic
*nic
,
628 struct flow_cls_offload
*tc_flow_cmd
)
630 struct otx2_flow_config
*flow_cfg
= nic
->flow_cfg
;
631 struct otx2_tc_info
*tc_info
= &nic
->tc_info
;
632 struct otx2_tc_flow
*flow_node
;
635 flow_node
= rhashtable_lookup_fast(&tc_info
->flow_table
,
636 &tc_flow_cmd
->cookie
,
637 tc_info
->flow_ht_params
);
639 netdev_err(nic
->netdev
, "tc flow not found for cookie 0x%lx\n",
640 tc_flow_cmd
->cookie
);
644 if (flow_node
->is_act_police
) {
645 mutex_lock(&nic
->mbox
.lock
);
647 err
= cn10k_map_unmap_rq_policer(nic
, flow_node
->rq
,
648 flow_node
->leaf_profile
, false);
650 netdev_err(nic
->netdev
,
651 "Unmapping RQ %d & profile %d failed\n",
652 flow_node
->rq
, flow_node
->leaf_profile
);
654 err
= cn10k_free_leaf_profile(nic
, flow_node
->leaf_profile
);
656 netdev_err(nic
->netdev
,
657 "Unable to free leaf bandwidth profile(%d)\n",
658 flow_node
->leaf_profile
);
660 __clear_bit(flow_node
->rq
, &nic
->rq_bmap
);
662 mutex_unlock(&nic
->mbox
.lock
);
665 otx2_del_mcam_flow_entry(nic
, flow_node
->entry
);
667 WARN_ON(rhashtable_remove_fast(&nic
->tc_info
.flow_table
,
669 nic
->tc_info
.flow_ht_params
));
670 kfree_rcu(flow_node
, rcu
);
672 clear_bit(flow_node
->bitpos
, tc_info
->tc_entries_bitmap
);
673 flow_cfg
->nr_flows
--;
678 static int otx2_tc_add_flow(struct otx2_nic
*nic
,
679 struct flow_cls_offload
*tc_flow_cmd
)
681 struct netlink_ext_ack
*extack
= tc_flow_cmd
->common
.extack
;
682 struct otx2_flow_config
*flow_cfg
= nic
->flow_cfg
;
683 struct otx2_tc_info
*tc_info
= &nic
->tc_info
;
684 struct otx2_tc_flow
*new_node
, *old_node
;
685 struct npc_install_flow_req
*req
, dummy
;
688 if (!(nic
->flags
& OTX2_FLAG_TC_FLOWER_SUPPORT
))
691 if (bitmap_full(tc_info
->tc_entries_bitmap
, flow_cfg
->max_flows
)) {
692 NL_SET_ERR_MSG_MOD(extack
,
693 "Free MCAM entry not available to add the flow");
697 /* allocate memory for the new flow and it's node */
698 new_node
= kzalloc(sizeof(*new_node
), GFP_KERNEL
);
701 spin_lock_init(&new_node
->lock
);
702 new_node
->cookie
= tc_flow_cmd
->cookie
;
704 memset(&dummy
, 0, sizeof(struct npc_install_flow_req
));
706 rc
= otx2_tc_prepare_flow(nic
, new_node
, tc_flow_cmd
, &dummy
);
708 kfree_rcu(new_node
, rcu
);
712 /* If a flow exists with the same cookie, delete it */
713 old_node
= rhashtable_lookup_fast(&tc_info
->flow_table
,
714 &tc_flow_cmd
->cookie
,
715 tc_info
->flow_ht_params
);
717 otx2_tc_del_flow(nic
, tc_flow_cmd
);
719 mutex_lock(&nic
->mbox
.lock
);
720 req
= otx2_mbox_alloc_msg_npc_install_flow(&nic
->mbox
);
722 mutex_unlock(&nic
->mbox
.lock
);
727 memcpy(&dummy
.hdr
, &req
->hdr
, sizeof(struct mbox_msghdr
));
728 memcpy(req
, &dummy
, sizeof(struct npc_install_flow_req
));
730 new_node
->bitpos
= find_first_zero_bit(tc_info
->tc_entries_bitmap
,
731 flow_cfg
->max_flows
);
732 req
->channel
= nic
->hw
.rx_chan_base
;
733 req
->entry
= flow_cfg
->flow_ent
[flow_cfg
->max_flows
- new_node
->bitpos
- 1];
734 req
->intf
= NIX_INTF_RX
;
736 new_node
->entry
= req
->entry
;
738 /* Send message to AF */
739 rc
= otx2_sync_mbox_msg(&nic
->mbox
);
741 NL_SET_ERR_MSG_MOD(extack
, "Failed to install MCAM flow entry");
742 mutex_unlock(&nic
->mbox
.lock
);
743 kfree_rcu(new_node
, rcu
);
746 mutex_unlock(&nic
->mbox
.lock
);
748 /* add new flow to flow-table */
749 rc
= rhashtable_insert_fast(&nic
->tc_info
.flow_table
, &new_node
->node
,
750 nic
->tc_info
.flow_ht_params
);
752 otx2_del_mcam_flow_entry(nic
, req
->entry
);
753 kfree_rcu(new_node
, rcu
);
757 set_bit(new_node
->bitpos
, tc_info
->tc_entries_bitmap
);
758 flow_cfg
->nr_flows
++;
763 if (new_node
->is_act_police
) {
764 mutex_lock(&nic
->mbox
.lock
);
766 err
= cn10k_map_unmap_rq_policer(nic
, new_node
->rq
,
767 new_node
->leaf_profile
, false);
769 netdev_err(nic
->netdev
,
770 "Unmapping RQ %d & profile %d failed\n",
771 new_node
->rq
, new_node
->leaf_profile
);
772 err
= cn10k_free_leaf_profile(nic
, new_node
->leaf_profile
);
774 netdev_err(nic
->netdev
,
775 "Unable to free leaf bandwidth profile(%d)\n",
776 new_node
->leaf_profile
);
778 __clear_bit(new_node
->rq
, &nic
->rq_bmap
);
780 mutex_unlock(&nic
->mbox
.lock
);
786 static int otx2_tc_get_flow_stats(struct otx2_nic
*nic
,
787 struct flow_cls_offload
*tc_flow_cmd
)
789 struct otx2_tc_info
*tc_info
= &nic
->tc_info
;
790 struct npc_mcam_get_stats_req
*req
;
791 struct npc_mcam_get_stats_rsp
*rsp
;
792 struct otx2_tc_flow_stats
*stats
;
793 struct otx2_tc_flow
*flow_node
;
796 flow_node
= rhashtable_lookup_fast(&tc_info
->flow_table
,
797 &tc_flow_cmd
->cookie
,
798 tc_info
->flow_ht_params
);
800 netdev_info(nic
->netdev
, "tc flow not found for cookie %lx",
801 tc_flow_cmd
->cookie
);
805 mutex_lock(&nic
->mbox
.lock
);
807 req
= otx2_mbox_alloc_msg_npc_mcam_entry_stats(&nic
->mbox
);
809 mutex_unlock(&nic
->mbox
.lock
);
813 req
->entry
= flow_node
->entry
;
815 err
= otx2_sync_mbox_msg(&nic
->mbox
);
817 netdev_err(nic
->netdev
, "Failed to get stats for MCAM flow entry %d\n",
819 mutex_unlock(&nic
->mbox
.lock
);
823 rsp
= (struct npc_mcam_get_stats_rsp
*)otx2_mbox_get_rsp
824 (&nic
->mbox
.mbox
, 0, &req
->hdr
);
826 mutex_unlock(&nic
->mbox
.lock
);
830 mutex_unlock(&nic
->mbox
.lock
);
835 stats
= &flow_node
->stats
;
837 spin_lock(&flow_node
->lock
);
838 flow_stats_update(&tc_flow_cmd
->stats
, 0x0, rsp
->stat
- stats
->pkts
, 0x0, 0x0,
839 FLOW_ACTION_HW_STATS_IMMEDIATE
);
840 stats
->pkts
= rsp
->stat
;
841 spin_unlock(&flow_node
->lock
);
846 static int otx2_setup_tc_cls_flower(struct otx2_nic
*nic
,
847 struct flow_cls_offload
*cls_flower
)
849 switch (cls_flower
->command
) {
850 case FLOW_CLS_REPLACE
:
851 return otx2_tc_add_flow(nic
, cls_flower
);
852 case FLOW_CLS_DESTROY
:
853 return otx2_tc_del_flow(nic
, cls_flower
);
855 return otx2_tc_get_flow_stats(nic
, cls_flower
);
861 static int otx2_tc_ingress_matchall_install(struct otx2_nic
*nic
,
862 struct tc_cls_matchall_offload
*cls
)
864 struct netlink_ext_ack
*extack
= cls
->common
.extack
;
865 struct flow_action
*actions
= &cls
->rule
->action
;
866 struct flow_action_entry
*entry
;
870 err
= otx2_tc_validate_flow(nic
, actions
, extack
);
874 if (nic
->flags
& OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED
) {
875 NL_SET_ERR_MSG_MOD(extack
,
876 "Only one ingress MATCHALL ratelimitter can be offloaded");
880 entry
= &cls
->rule
->action
.entries
[0];
882 case FLOW_ACTION_POLICE
:
883 /* Ingress ratelimiting is not supported on OcteonTx2 */
884 if (is_dev_otx2(nic
->pdev
)) {
885 NL_SET_ERR_MSG_MOD(extack
,
886 "Ingress policing not supported on this platform");
890 err
= cn10k_alloc_matchall_ipolicer(nic
);
894 /* Convert to bits per second */
895 rate
= entry
->police
.rate_bytes_ps
* 8;
896 err
= cn10k_set_matchall_ipolicer_rate(nic
, entry
->police
.burst
, rate
);
899 nic
->flags
|= OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED
;
902 NL_SET_ERR_MSG_MOD(extack
,
903 "Only police action supported with Ingress MATCHALL offload");
910 static int otx2_tc_ingress_matchall_delete(struct otx2_nic
*nic
,
911 struct tc_cls_matchall_offload
*cls
)
913 struct netlink_ext_ack
*extack
= cls
->common
.extack
;
916 if (nic
->flags
& OTX2_FLAG_INTF_DOWN
) {
917 NL_SET_ERR_MSG_MOD(extack
, "Interface not initialized");
921 err
= cn10k_free_matchall_ipolicer(nic
);
922 nic
->flags
&= ~OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED
;
926 static int otx2_setup_tc_ingress_matchall(struct otx2_nic
*nic
,
927 struct tc_cls_matchall_offload
*cls_matchall
)
929 switch (cls_matchall
->command
) {
930 case TC_CLSMATCHALL_REPLACE
:
931 return otx2_tc_ingress_matchall_install(nic
, cls_matchall
);
932 case TC_CLSMATCHALL_DESTROY
:
933 return otx2_tc_ingress_matchall_delete(nic
, cls_matchall
);
934 case TC_CLSMATCHALL_STATS
:
942 static int otx2_setup_tc_block_ingress_cb(enum tc_setup_type type
,
943 void *type_data
, void *cb_priv
)
945 struct otx2_nic
*nic
= cb_priv
;
947 if (!tc_cls_can_offload_and_chain0(nic
->netdev
, type_data
))
951 case TC_SETUP_CLSFLOWER
:
952 return otx2_setup_tc_cls_flower(nic
, type_data
);
953 case TC_SETUP_CLSMATCHALL
:
954 return otx2_setup_tc_ingress_matchall(nic
, type_data
);
962 static int otx2_setup_tc_egress_matchall(struct otx2_nic
*nic
,
963 struct tc_cls_matchall_offload
*cls_matchall
)
965 switch (cls_matchall
->command
) {
966 case TC_CLSMATCHALL_REPLACE
:
967 return otx2_tc_egress_matchall_install(nic
, cls_matchall
);
968 case TC_CLSMATCHALL_DESTROY
:
969 return otx2_tc_egress_matchall_delete(nic
, cls_matchall
);
970 case TC_CLSMATCHALL_STATS
:
978 static int otx2_setup_tc_block_egress_cb(enum tc_setup_type type
,
979 void *type_data
, void *cb_priv
)
981 struct otx2_nic
*nic
= cb_priv
;
983 if (!tc_cls_can_offload_and_chain0(nic
->netdev
, type_data
))
987 case TC_SETUP_CLSMATCHALL
:
988 return otx2_setup_tc_egress_matchall(nic
, type_data
);
996 static LIST_HEAD(otx2_block_cb_list
);
998 static int otx2_setup_tc_block(struct net_device
*netdev
,
999 struct flow_block_offload
*f
)
1001 struct otx2_nic
*nic
= netdev_priv(netdev
);
1002 flow_setup_cb_t
*cb
;
1005 if (f
->block_shared
)
1008 if (f
->binder_type
== FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS
) {
1009 cb
= otx2_setup_tc_block_ingress_cb
;
1011 } else if (f
->binder_type
== FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS
) {
1012 cb
= otx2_setup_tc_block_egress_cb
;
1018 return flow_block_cb_setup_simple(f
, &otx2_block_cb_list
, cb
,
1022 int otx2_setup_tc(struct net_device
*netdev
, enum tc_setup_type type
,
1026 case TC_SETUP_BLOCK
:
1027 return otx2_setup_tc_block(netdev
, type_data
);
1033 static const struct rhashtable_params tc_flow_ht_params
= {
1034 .head_offset
= offsetof(struct otx2_tc_flow
, node
),
1035 .key_offset
= offsetof(struct otx2_tc_flow
, cookie
),
1036 .key_len
= sizeof(((struct otx2_tc_flow
*)0)->cookie
),
1037 .automatic_shrinking
= true,
1040 int otx2_init_tc(struct otx2_nic
*nic
)
1042 struct otx2_tc_info
*tc
= &nic
->tc_info
;
1045 /* Exclude receive queue 0 being used for police action */
1046 set_bit(0, &nic
->rq_bmap
);
1048 if (!nic
->flow_cfg
) {
1049 netdev_err(nic
->netdev
,
1050 "Can't init TC, nic->flow_cfg is not setup\n");
1054 err
= otx2_tc_alloc_ent_bitmap(nic
);
1058 tc
->flow_ht_params
= tc_flow_ht_params
;
1059 return rhashtable_init(&tc
->flow_table
, &tc
->flow_ht_params
);
1062 void otx2_shutdown_tc(struct otx2_nic
*nic
)
1064 struct otx2_tc_info
*tc
= &nic
->tc_info
;
1066 kfree(tc
->tc_entries_bitmap
);
1067 rhashtable_destroy(&tc
->flow_table
);