1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
4 * Copyright (C) 2018 Marvell.
8 #include <linux/module.h>
11 #include "rvu_struct.h"
16 #include "lmac_common.h"
18 static void nix_free_tx_vtag_entries(struct rvu
*rvu
, u16 pcifunc
);
19 static int rvu_nix_get_bpid(struct rvu
*rvu
, struct nix_bp_cfg_req
*req
,
20 int type
, int chan_id
);
21 static int nix_update_mce_rule(struct rvu
*rvu
, u16 pcifunc
,
23 static int nix_setup_ipolicers(struct rvu
*rvu
,
24 struct nix_hw
*nix_hw
, int blkaddr
);
25 static void nix_ipolicer_freemem(struct rvu
*rvu
, struct nix_hw
*nix_hw
);
26 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req
*req
,
27 struct nix_hw
*nix_hw
, u16 pcifunc
);
28 static int nix_free_all_bandprof(struct rvu
*rvu
, u16 pcifunc
);
29 static void nix_clear_ratelimit_aggr(struct rvu
*rvu
, struct nix_hw
*nix_hw
,
56 enum nix_makr_fmt_indexes
{
57 NIX_MARK_CFG_IP_DSCP_RED
,
58 NIX_MARK_CFG_IP_DSCP_YELLOW
,
59 NIX_MARK_CFG_IP_DSCP_YELLOW_RED
,
60 NIX_MARK_CFG_IP_ECN_RED
,
61 NIX_MARK_CFG_IP_ECN_YELLOW
,
62 NIX_MARK_CFG_IP_ECN_YELLOW_RED
,
63 NIX_MARK_CFG_VLAN_DEI_RED
,
64 NIX_MARK_CFG_VLAN_DEI_YELLOW
,
65 NIX_MARK_CFG_VLAN_DEI_YELLOW_RED
,
69 /* For now considering MC resources needed for broadcast
70 * pkt replication only. i.e 256 HWVFs + 12 PFs.
72 #define MC_TBL_SIZE MC_TBL_SZ_512
73 #define MC_BUF_CNT MC_BUF_CNT_128
76 struct hlist_node node
;
80 int rvu_get_next_nix_blkaddr(struct rvu
*rvu
, int blkaddr
)
84 /*If blkaddr is 0, return the first nix block address*/
86 return rvu
->nix_blkaddr
[blkaddr
];
88 while (i
+ 1 < MAX_NIX_BLKS
) {
89 if (rvu
->nix_blkaddr
[i
] == blkaddr
)
90 return rvu
->nix_blkaddr
[i
+ 1];
97 bool is_nixlf_attached(struct rvu
*rvu
, u16 pcifunc
)
99 struct rvu_pfvf
*pfvf
= rvu_get_pfvf(rvu
, pcifunc
);
102 blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NIX
, pcifunc
);
103 if (!pfvf
->nixlf
|| blkaddr
< 0)
108 int rvu_get_nixlf_count(struct rvu
*rvu
)
110 int blkaddr
= 0, max
= 0;
111 struct rvu_block
*block
;
113 blkaddr
= rvu_get_next_nix_blkaddr(rvu
, blkaddr
);
115 block
= &rvu
->hw
->block
[blkaddr
];
116 max
+= block
->lf
.max
;
117 blkaddr
= rvu_get_next_nix_blkaddr(rvu
, blkaddr
);
122 int nix_get_nixlf(struct rvu
*rvu
, u16 pcifunc
, int *nixlf
, int *nix_blkaddr
)
124 struct rvu_pfvf
*pfvf
= rvu_get_pfvf(rvu
, pcifunc
);
125 struct rvu_hwinfo
*hw
= rvu
->hw
;
128 blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NIX
, pcifunc
);
129 if (!pfvf
->nixlf
|| blkaddr
< 0)
130 return NIX_AF_ERR_AF_LF_INVALID
;
132 *nixlf
= rvu_get_lf(rvu
, &hw
->block
[blkaddr
], pcifunc
, 0);
134 return NIX_AF_ERR_AF_LF_INVALID
;
137 *nix_blkaddr
= blkaddr
;
142 int nix_get_struct_ptrs(struct rvu
*rvu
, u16 pcifunc
,
143 struct nix_hw
**nix_hw
, int *blkaddr
)
145 struct rvu_pfvf
*pfvf
;
147 pfvf
= rvu_get_pfvf(rvu
, pcifunc
);
148 *blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NIX
, pcifunc
);
149 if (!pfvf
->nixlf
|| *blkaddr
< 0)
150 return NIX_AF_ERR_AF_LF_INVALID
;
152 *nix_hw
= get_nix_hw(rvu
->hw
, *blkaddr
);
154 return NIX_AF_ERR_INVALID_NIXBLK
;
158 static void nix_mce_list_init(struct nix_mce_list
*list
, int max
)
160 INIT_HLIST_HEAD(&list
->head
);
165 static u16
nix_alloc_mce_list(struct nix_mcast
*mcast
, int count
)
172 idx
= mcast
->next_free_mce
;
173 mcast
->next_free_mce
+= count
;
177 struct nix_hw
*get_nix_hw(struct rvu_hwinfo
*hw
, int blkaddr
)
179 int nix_blkaddr
= 0, i
= 0;
180 struct rvu
*rvu
= hw
->rvu
;
182 nix_blkaddr
= rvu_get_next_nix_blkaddr(rvu
, nix_blkaddr
);
183 while (nix_blkaddr
) {
184 if (blkaddr
== nix_blkaddr
&& hw
->nix
)
186 nix_blkaddr
= rvu_get_next_nix_blkaddr(rvu
, nix_blkaddr
);
192 u32
convert_dwrr_mtu_to_bytes(u8 dwrr_mtu
)
196 /* MTU used for DWRR calculation is in power of 2 up until 64K bytes.
197 * Value of 4 is reserved for MTU value of 9728 bytes.
198 * Value of 5 is reserved for MTU value of 10240 bytes.
206 return BIT_ULL(dwrr_mtu
);
212 u32
convert_bytes_to_dwrr_mtu(u32 bytes
)
214 /* MTU used for DWRR calculation is in power of 2 up until 64K bytes.
215 * Value of 4 is reserved for MTU value of 9728 bytes.
216 * Value of 5 is reserved for MTU value of 10240 bytes.
218 if (bytes
> BIT_ULL(16))
233 static void nix_rx_sync(struct rvu
*rvu
, int blkaddr
)
237 /* Sync all in flight RX packets to LLC/DRAM */
238 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_SW_SYNC
, BIT_ULL(0));
239 err
= rvu_poll_reg(rvu
, blkaddr
, NIX_AF_RX_SW_SYNC
, BIT_ULL(0), true);
241 dev_err(rvu
->dev
, "SYNC1: NIX RX software sync failed\n");
243 /* SW_SYNC ensures all existing transactions are finished and pkts
244 * are written to LLC/DRAM, queues should be teared down after
245 * successful SW_SYNC. Due to a HW errata, in some rare scenarios
246 * an existing transaction might end after SW_SYNC operation. To
247 * ensure operation is fully done, do the SW_SYNC twice.
249 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_SW_SYNC
, BIT_ULL(0));
250 err
= rvu_poll_reg(rvu
, blkaddr
, NIX_AF_RX_SW_SYNC
, BIT_ULL(0), true);
252 dev_err(rvu
->dev
, "SYNC2: NIX RX software sync failed\n");
255 static bool is_valid_txschq(struct rvu
*rvu
, int blkaddr
,
256 int lvl
, u16 pcifunc
, u16 schq
)
258 struct rvu_hwinfo
*hw
= rvu
->hw
;
259 struct nix_txsch
*txsch
;
260 struct nix_hw
*nix_hw
;
263 nix_hw
= get_nix_hw(rvu
->hw
, blkaddr
);
267 txsch
= &nix_hw
->txsch
[lvl
];
268 /* Check out of bounds */
269 if (schq
>= txsch
->schq
.max
)
272 mutex_lock(&rvu
->rsrc_lock
);
273 map_func
= TXSCH_MAP_FUNC(txsch
->pfvf_map
[schq
]);
274 mutex_unlock(&rvu
->rsrc_lock
);
276 /* TLs aggegating traffic are shared across PF and VFs */
277 if (lvl
>= hw
->cap
.nix_tx_aggr_lvl
) {
278 if (rvu_get_pf(map_func
) != rvu_get_pf(pcifunc
))
284 if (map_func
!= pcifunc
)
290 static int nix_interface_init(struct rvu
*rvu
, u16 pcifunc
, int type
, int nixlf
,
291 struct nix_lf_alloc_rsp
*rsp
, bool loop
)
293 struct rvu_pfvf
*parent_pf
, *pfvf
= rvu_get_pfvf(rvu
, pcifunc
);
294 u16 req_chan_base
, req_chan_end
, req_chan_cnt
;
295 struct rvu_hwinfo
*hw
= rvu
->hw
;
296 struct sdp_node_info
*sdp_info
;
297 int pkind
, pf
, vf
, lbkid
, vfid
;
298 struct mac_ops
*mac_ops
;
303 pf
= rvu_get_pf(pcifunc
);
304 if (!is_pf_cgxmapped(rvu
, pf
) && type
!= NIX_INTF_TYPE_LBK
&&
305 type
!= NIX_INTF_TYPE_SDP
)
309 case NIX_INTF_TYPE_CGX
:
310 pfvf
->cgx_lmac
= rvu
->pf2cgxlmac_map
[pf
];
311 rvu_get_cgx_lmac_id(pfvf
->cgx_lmac
, &cgx_id
, &lmac_id
);
313 pkind
= rvu_npc_get_pkind(rvu
, pf
);
316 "PF_Func 0x%x: Invalid pkind\n", pcifunc
);
319 pfvf
->rx_chan_base
= rvu_nix_chan_cgx(rvu
, cgx_id
, lmac_id
, 0);
320 pfvf
->tx_chan_base
= pfvf
->rx_chan_base
;
321 pfvf
->rx_chan_cnt
= 1;
322 pfvf
->tx_chan_cnt
= 1;
323 rsp
->tx_link
= cgx_id
* hw
->lmac_per_cgx
+ lmac_id
;
325 cgx_set_pkind(rvu_cgx_pdata(cgx_id
, rvu
), lmac_id
, pkind
);
326 rvu_npc_set_pkind(rvu
, pkind
, pfvf
);
328 mac_ops
= get_mac_ops(rvu_cgx_pdata(cgx_id
, rvu
));
330 /* By default we enable pause frames */
331 if ((pcifunc
& RVU_PFVF_FUNC_MASK
) == 0)
332 mac_ops
->mac_enadis_pause_frm(rvu_cgx_pdata(cgx_id
,
334 lmac_id
, true, true);
336 case NIX_INTF_TYPE_LBK
:
337 vf
= (pcifunc
& RVU_PFVF_FUNC_MASK
) - 1;
339 /* If NIX1 block is present on the silicon then NIXes are
340 * assigned alternatively for lbk interfaces. NIX0 should
341 * send packets on lbk link 1 channels and NIX1 should send
342 * on lbk link 0 channels for the communication between
346 if (rvu
->hw
->lbk_links
> 1)
347 lbkid
= vf
& 0x1 ? 0 : 1;
349 /* By default NIX0 is configured to send packet on lbk link 1
350 * (which corresponds to LBK1), same packet will receive on
351 * NIX1 over lbk link 0. If NIX1 sends packet on lbk link 0
352 * (which corresponds to LBK2) packet will receive on NIX0 lbk
354 * But if lbk links for NIX0 and NIX1 are negated, i.e NIX0
355 * transmits and receives on lbk link 0, whick corresponds
356 * to LBK1 block, back to back connectivity between NIX and
357 * LBK can be achieved (which is similar to 96xx)
360 * NIX0 lbk link 1 (LBK2) 1 (LBK1)
361 * NIX0 lbk link 0 (LBK0) 0 (LBK0)
362 * NIX1 lbk link 0 (LBK1) 0 (LBK2)
363 * NIX1 lbk link 1 (LBK3) 1 (LBK3)
368 /* Note that AF's VFs work in pairs and talk over consecutive
369 * loopback channels.Therefore if odd number of AF VFs are
370 * enabled then the last VF remains with no pair.
372 pfvf
->rx_chan_base
= rvu_nix_chan_lbk(rvu
, lbkid
, vf
);
373 pfvf
->tx_chan_base
= vf
& 0x1 ?
374 rvu_nix_chan_lbk(rvu
, lbkid
, vf
- 1) :
375 rvu_nix_chan_lbk(rvu
, lbkid
, vf
+ 1);
376 pfvf
->rx_chan_cnt
= 1;
377 pfvf
->tx_chan_cnt
= 1;
378 rsp
->tx_link
= hw
->cgx_links
+ lbkid
;
380 rvu_npc_set_pkind(rvu
, NPC_RX_LBK_PKIND
, pfvf
);
381 rvu_npc_install_promisc_entry(rvu
, pcifunc
, nixlf
,
386 case NIX_INTF_TYPE_SDP
:
387 from_vf
= !!(pcifunc
& RVU_PFVF_FUNC_MASK
);
388 parent_pf
= &rvu
->pf
[rvu_get_pf(pcifunc
)];
389 sdp_info
= parent_pf
->sdp_info
;
391 dev_err(rvu
->dev
, "Invalid sdp_info pointer\n");
395 req_chan_base
= rvu_nix_chan_sdp(rvu
, 0) + sdp_info
->pf_srn
+
396 sdp_info
->num_pf_rings
;
397 vf
= (pcifunc
& RVU_PFVF_FUNC_MASK
) - 1;
398 for (vfid
= 0; vfid
< vf
; vfid
++)
399 req_chan_base
+= sdp_info
->vf_rings
[vfid
];
400 req_chan_cnt
= sdp_info
->vf_rings
[vf
];
401 req_chan_end
= req_chan_base
+ req_chan_cnt
- 1;
402 if (req_chan_base
< rvu_nix_chan_sdp(rvu
, 0) ||
403 req_chan_end
> rvu_nix_chan_sdp(rvu
, 255)) {
405 "PF_Func 0x%x: Invalid channel base and count\n",
410 req_chan_base
= rvu_nix_chan_sdp(rvu
, 0) + sdp_info
->pf_srn
;
411 req_chan_cnt
= sdp_info
->num_pf_rings
;
414 pfvf
->rx_chan_base
= req_chan_base
;
415 pfvf
->rx_chan_cnt
= req_chan_cnt
;
416 pfvf
->tx_chan_base
= pfvf
->rx_chan_base
;
417 pfvf
->tx_chan_cnt
= pfvf
->rx_chan_cnt
;
419 rsp
->tx_link
= hw
->cgx_links
+ hw
->lbk_links
;
420 rvu_npc_install_promisc_entry(rvu
, pcifunc
, nixlf
,
426 /* Add a UCAST forwarding rule in MCAM with this NIXLF attached
427 * RVU PF/VF's MAC address.
429 rvu_npc_install_ucast_entry(rvu
, pcifunc
, nixlf
,
430 pfvf
->rx_chan_base
, pfvf
->mac_addr
);
432 /* Add this PF_FUNC to bcast pkt replication list */
433 err
= nix_update_mce_rule(rvu
, pcifunc
, NIXLF_BCAST_ENTRY
, true);
436 "Bcast list, failed to enable PF_FUNC 0x%x\n",
440 /* Install MCAM rule matching Ethernet broadcast mac address */
441 rvu_npc_install_bcast_match_entry(rvu
, pcifunc
,
442 nixlf
, pfvf
->rx_chan_base
);
444 pfvf
->maxlen
= NIC_HW_MIN_FRS
;
445 pfvf
->minlen
= NIC_HW_MIN_FRS
;
450 static void nix_interface_deinit(struct rvu
*rvu
, u16 pcifunc
, u8 nixlf
)
452 struct rvu_pfvf
*pfvf
= rvu_get_pfvf(rvu
, pcifunc
);
458 /* Remove this PF_FUNC from bcast pkt replication list */
459 err
= nix_update_mce_rule(rvu
, pcifunc
, NIXLF_BCAST_ENTRY
, false);
462 "Bcast list, failed to disable PF_FUNC 0x%x\n",
466 /* Free and disable any MCAM entries used by this NIX LF */
467 rvu_npc_disable_mcam_entries(rvu
, pcifunc
, nixlf
);
469 /* Disable DMAC filters used */
470 rvu_cgx_disable_dmac_entries(rvu
, pcifunc
);
473 int rvu_mbox_handler_nix_bp_disable(struct rvu
*rvu
,
474 struct nix_bp_cfg_req
*req
,
477 u16 pcifunc
= req
->hdr
.pcifunc
;
478 struct rvu_pfvf
*pfvf
;
479 int blkaddr
, pf
, type
;
483 pf
= rvu_get_pf(pcifunc
);
484 type
= is_afvf(pcifunc
) ? NIX_INTF_TYPE_LBK
: NIX_INTF_TYPE_CGX
;
485 if (!is_pf_cgxmapped(rvu
, pf
) && type
!= NIX_INTF_TYPE_LBK
)
488 pfvf
= rvu_get_pfvf(rvu
, pcifunc
);
489 blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NIX
, pcifunc
);
491 chan_base
= pfvf
->rx_chan_base
+ req
->chan_base
;
492 for (chan
= chan_base
; chan
< (chan_base
+ req
->chan_cnt
); chan
++) {
493 cfg
= rvu_read64(rvu
, blkaddr
, NIX_AF_RX_CHANX_CFG(chan
));
494 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_CHANX_CFG(chan
),
500 static int rvu_nix_get_bpid(struct rvu
*rvu
, struct nix_bp_cfg_req
*req
,
501 int type
, int chan_id
)
503 int bpid
, blkaddr
, lmac_chan_cnt
, sdp_chan_cnt
;
504 u16 cgx_bpid_cnt
, lbk_bpid_cnt
, sdp_bpid_cnt
;
505 struct rvu_hwinfo
*hw
= rvu
->hw
;
506 struct rvu_pfvf
*pfvf
;
510 blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NIX
, req
->hdr
.pcifunc
);
511 cfg
= rvu_read64(rvu
, blkaddr
, NIX_AF_CONST
);
512 lmac_chan_cnt
= cfg
& 0xFF;
514 cgx_bpid_cnt
= hw
->cgx_links
* lmac_chan_cnt
;
515 lbk_bpid_cnt
= hw
->lbk_links
* ((cfg
>> 16) & 0xFF);
517 cfg
= rvu_read64(rvu
, blkaddr
, NIX_AF_CONST1
);
518 sdp_chan_cnt
= cfg
& 0xFFF;
519 sdp_bpid_cnt
= hw
->sdp_links
* sdp_chan_cnt
;
521 pfvf
= rvu_get_pfvf(rvu
, req
->hdr
.pcifunc
);
523 /* Backpressure IDs range division
524 * CGX channles are mapped to (0 - 191) BPIDs
525 * LBK channles are mapped to (192 - 255) BPIDs
526 * SDP channles are mapped to (256 - 511) BPIDs
528 * Lmac channles and bpids mapped as follows
529 * cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15)
530 * cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) ....
531 * cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) ....
534 case NIX_INTF_TYPE_CGX
:
535 if ((req
->chan_base
+ req
->chan_cnt
) > 15)
537 rvu_get_cgx_lmac_id(pfvf
->cgx_lmac
, &cgx_id
, &lmac_id
);
538 /* Assign bpid based on cgx, lmac and chan id */
539 bpid
= (cgx_id
* hw
->lmac_per_cgx
* lmac_chan_cnt
) +
540 (lmac_id
* lmac_chan_cnt
) + req
->chan_base
;
542 if (req
->bpid_per_chan
)
544 if (bpid
> cgx_bpid_cnt
)
548 case NIX_INTF_TYPE_LBK
:
549 if ((req
->chan_base
+ req
->chan_cnt
) > 63)
551 bpid
= cgx_bpid_cnt
+ req
->chan_base
;
552 if (req
->bpid_per_chan
)
554 if (bpid
> (cgx_bpid_cnt
+ lbk_bpid_cnt
))
557 case NIX_INTF_TYPE_SDP
:
558 if ((req
->chan_base
+ req
->chan_cnt
) > 255)
561 bpid
= sdp_bpid_cnt
+ req
->chan_base
;
562 if (req
->bpid_per_chan
)
565 if (bpid
> (cgx_bpid_cnt
+ lbk_bpid_cnt
+ sdp_bpid_cnt
))
574 int rvu_mbox_handler_nix_bp_enable(struct rvu
*rvu
,
575 struct nix_bp_cfg_req
*req
,
576 struct nix_bp_cfg_rsp
*rsp
)
578 int blkaddr
, pf
, type
, chan_id
= 0;
579 u16 pcifunc
= req
->hdr
.pcifunc
;
580 struct rvu_pfvf
*pfvf
;
585 pf
= rvu_get_pf(pcifunc
);
586 type
= is_afvf(pcifunc
) ? NIX_INTF_TYPE_LBK
: NIX_INTF_TYPE_CGX
;
587 if (is_sdp_pfvf(pcifunc
))
588 type
= NIX_INTF_TYPE_SDP
;
590 /* Enable backpressure only for CGX mapped PFs and LBK/SDP interface */
591 if (!is_pf_cgxmapped(rvu
, pf
) && type
!= NIX_INTF_TYPE_LBK
&&
592 type
!= NIX_INTF_TYPE_SDP
)
595 pfvf
= rvu_get_pfvf(rvu
, pcifunc
);
596 blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NIX
, pcifunc
);
598 bpid_base
= rvu_nix_get_bpid(rvu
, req
, type
, chan_id
);
599 chan_base
= pfvf
->rx_chan_base
+ req
->chan_base
;
602 for (chan
= chan_base
; chan
< (chan_base
+ req
->chan_cnt
); chan
++) {
604 dev_warn(rvu
->dev
, "Fail to enable backpressure\n");
608 cfg
= rvu_read64(rvu
, blkaddr
, NIX_AF_RX_CHANX_CFG(chan
));
609 cfg
&= ~GENMASK_ULL(8, 0);
610 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_CHANX_CFG(chan
),
611 cfg
| (bpid
& GENMASK_ULL(8, 0)) | BIT_ULL(16));
613 bpid
= rvu_nix_get_bpid(rvu
, req
, type
, chan_id
);
616 for (chan
= 0; chan
< req
->chan_cnt
; chan
++) {
617 /* Map channel and bpid assign to it */
618 rsp
->chan_bpid
[chan
] = ((req
->chan_base
+ chan
) & 0x7F) << 10 |
620 if (req
->bpid_per_chan
)
623 rsp
->chan_cnt
= req
->chan_cnt
;
628 static void nix_setup_lso_tso_l3(struct rvu
*rvu
, int blkaddr
,
629 u64 format
, bool v4
, u64
*fidx
)
631 struct nix_lso_format field
= {0};
633 /* IP's Length field */
634 field
.layer
= NIX_TXLAYER_OL3
;
635 /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
636 field
.offset
= v4
? 2 : 4;
637 field
.sizem1
= 1; /* i.e 2 bytes */
638 field
.alg
= NIX_LSOALG_ADD_PAYLEN
;
639 rvu_write64(rvu
, blkaddr
,
640 NIX_AF_LSO_FORMATX_FIELDX(format
, (*fidx
)++),
643 /* No ID field in IPv6 header */
648 field
.layer
= NIX_TXLAYER_OL3
;
650 field
.sizem1
= 1; /* i.e 2 bytes */
651 field
.alg
= NIX_LSOALG_ADD_SEGNUM
;
652 rvu_write64(rvu
, blkaddr
,
653 NIX_AF_LSO_FORMATX_FIELDX(format
, (*fidx
)++),
657 static void nix_setup_lso_tso_l4(struct rvu
*rvu
, int blkaddr
,
658 u64 format
, u64
*fidx
)
660 struct nix_lso_format field
= {0};
662 /* TCP's sequence number field */
663 field
.layer
= NIX_TXLAYER_OL4
;
665 field
.sizem1
= 3; /* i.e 4 bytes */
666 field
.alg
= NIX_LSOALG_ADD_OFFSET
;
667 rvu_write64(rvu
, blkaddr
,
668 NIX_AF_LSO_FORMATX_FIELDX(format
, (*fidx
)++),
671 /* TCP's flags field */
672 field
.layer
= NIX_TXLAYER_OL4
;
674 field
.sizem1
= 1; /* 2 bytes */
675 field
.alg
= NIX_LSOALG_TCP_FLAGS
;
676 rvu_write64(rvu
, blkaddr
,
677 NIX_AF_LSO_FORMATX_FIELDX(format
, (*fidx
)++),
681 static void nix_setup_lso(struct rvu
*rvu
, struct nix_hw
*nix_hw
, int blkaddr
)
683 u64 cfg
, idx
, fidx
= 0;
685 /* Get max HW supported format indices */
686 cfg
= (rvu_read64(rvu
, blkaddr
, NIX_AF_CONST1
) >> 48) & 0xFF;
687 nix_hw
->lso
.total
= cfg
;
690 cfg
= rvu_read64(rvu
, blkaddr
, NIX_AF_LSO_CFG
);
691 /* For TSO, set first and middle segment flags to
692 * mask out PSH, RST & FIN flags in TCP packet
694 cfg
&= ~((0xFFFFULL
<< 32) | (0xFFFFULL
<< 16));
695 cfg
|= (0xFFF2ULL
<< 32) | (0xFFF2ULL
<< 16);
696 rvu_write64(rvu
, blkaddr
, NIX_AF_LSO_CFG
, cfg
| BIT_ULL(63));
698 /* Setup default static LSO formats
700 * Configure format fields for TCPv4 segmentation offload
702 idx
= NIX_LSO_FORMAT_IDX_TSOV4
;
703 nix_setup_lso_tso_l3(rvu
, blkaddr
, idx
, true, &fidx
);
704 nix_setup_lso_tso_l4(rvu
, blkaddr
, idx
, &fidx
);
706 /* Set rest of the fields to NOP */
707 for (; fidx
< 8; fidx
++) {
708 rvu_write64(rvu
, blkaddr
,
709 NIX_AF_LSO_FORMATX_FIELDX(idx
, fidx
), 0x0ULL
);
711 nix_hw
->lso
.in_use
++;
713 /* Configure format fields for TCPv6 segmentation offload */
714 idx
= NIX_LSO_FORMAT_IDX_TSOV6
;
716 nix_setup_lso_tso_l3(rvu
, blkaddr
, idx
, false, &fidx
);
717 nix_setup_lso_tso_l4(rvu
, blkaddr
, idx
, &fidx
);
719 /* Set rest of the fields to NOP */
720 for (; fidx
< 8; fidx
++) {
721 rvu_write64(rvu
, blkaddr
,
722 NIX_AF_LSO_FORMATX_FIELDX(idx
, fidx
), 0x0ULL
);
724 nix_hw
->lso
.in_use
++;
727 static void nix_ctx_free(struct rvu
*rvu
, struct rvu_pfvf
*pfvf
)
729 kfree(pfvf
->rq_bmap
);
730 kfree(pfvf
->sq_bmap
);
731 kfree(pfvf
->cq_bmap
);
733 qmem_free(rvu
->dev
, pfvf
->rq_ctx
);
735 qmem_free(rvu
->dev
, pfvf
->sq_ctx
);
737 qmem_free(rvu
->dev
, pfvf
->cq_ctx
);
739 qmem_free(rvu
->dev
, pfvf
->rss_ctx
);
740 if (pfvf
->nix_qints_ctx
)
741 qmem_free(rvu
->dev
, pfvf
->nix_qints_ctx
);
742 if (pfvf
->cq_ints_ctx
)
743 qmem_free(rvu
->dev
, pfvf
->cq_ints_ctx
);
745 pfvf
->rq_bmap
= NULL
;
746 pfvf
->cq_bmap
= NULL
;
747 pfvf
->sq_bmap
= NULL
;
751 pfvf
->rss_ctx
= NULL
;
752 pfvf
->nix_qints_ctx
= NULL
;
753 pfvf
->cq_ints_ctx
= NULL
;
756 static int nixlf_rss_ctx_init(struct rvu
*rvu
, int blkaddr
,
757 struct rvu_pfvf
*pfvf
, int nixlf
,
758 int rss_sz
, int rss_grps
, int hwctx_size
,
759 u64 way_mask
, bool tag_lsb_as_adder
)
761 int err
, grp
, num_indices
;
764 /* RSS is not requested for this NIXLF */
767 num_indices
= rss_sz
* rss_grps
;
769 /* Alloc NIX RSS HW context memory and config the base */
770 err
= qmem_alloc(rvu
->dev
, &pfvf
->rss_ctx
, num_indices
, hwctx_size
);
774 rvu_write64(rvu
, blkaddr
, NIX_AF_LFX_RSS_BASE(nixlf
),
775 (u64
)pfvf
->rss_ctx
->iova
);
777 /* Config full RSS table size, enable RSS and caching */
778 val
= BIT_ULL(36) | BIT_ULL(4) | way_mask
<< 20 |
779 ilog2(num_indices
/ MAX_RSS_INDIR_TBL_SIZE
);
781 if (tag_lsb_as_adder
)
784 rvu_write64(rvu
, blkaddr
, NIX_AF_LFX_RSS_CFG(nixlf
), val
);
785 /* Config RSS group offset and sizes */
786 for (grp
= 0; grp
< rss_grps
; grp
++)
787 rvu_write64(rvu
, blkaddr
, NIX_AF_LFX_RSS_GRPX(nixlf
, grp
),
788 ((ilog2(rss_sz
) - 1) << 16) | (rss_sz
* grp
));
792 static int nix_aq_enqueue_wait(struct rvu
*rvu
, struct rvu_block
*block
,
793 struct nix_aq_inst_s
*inst
)
795 struct admin_queue
*aq
= block
->aq
;
796 struct nix_aq_res_s
*result
;
800 result
= (struct nix_aq_res_s
*)aq
->res
->base
;
802 /* Get current head pointer where to append this instruction */
803 reg
= rvu_read64(rvu
, block
->addr
, NIX_AF_AQ_STATUS
);
804 head
= (reg
>> 4) & AQ_PTR_MASK
;
806 memcpy((void *)(aq
->inst
->base
+ (head
* aq
->inst
->entry_sz
)),
807 (void *)inst
, aq
->inst
->entry_sz
);
808 memset(result
, 0, sizeof(*result
));
809 /* sync into memory */
812 /* Ring the doorbell and wait for result */
813 rvu_write64(rvu
, block
->addr
, NIX_AF_AQ_DOOR
, 1);
814 while (result
->compcode
== NIX_AQ_COMP_NOTDONE
) {
822 if (result
->compcode
!= NIX_AQ_COMP_GOOD
)
823 /* TODO: Replace this with some error code */
829 static int rvu_nix_blk_aq_enq_inst(struct rvu
*rvu
, struct nix_hw
*nix_hw
,
830 struct nix_aq_enq_req
*req
,
831 struct nix_aq_enq_rsp
*rsp
)
833 struct rvu_hwinfo
*hw
= rvu
->hw
;
834 u16 pcifunc
= req
->hdr
.pcifunc
;
835 int nixlf
, blkaddr
, rc
= 0;
836 struct nix_aq_inst_s inst
;
837 struct rvu_block
*block
;
838 struct admin_queue
*aq
;
839 struct rvu_pfvf
*pfvf
;
844 blkaddr
= nix_hw
->blkaddr
;
845 block
= &hw
->block
[blkaddr
];
848 dev_warn(rvu
->dev
, "%s: NIX AQ not initialized\n", __func__
);
849 return NIX_AF_ERR_AQ_ENQUEUE
;
852 pfvf
= rvu_get_pfvf(rvu
, pcifunc
);
853 nixlf
= rvu_get_lf(rvu
, block
, pcifunc
, 0);
855 /* Skip NIXLF check for broadcast MCE entry and bandwidth profile
856 * operations done by AF itself.
858 if (!((!rsp
&& req
->ctype
== NIX_AQ_CTYPE_MCE
) ||
859 (req
->ctype
== NIX_AQ_CTYPE_BANDPROF
&& !pcifunc
))) {
860 if (!pfvf
->nixlf
|| nixlf
< 0)
861 return NIX_AF_ERR_AF_LF_INVALID
;
864 switch (req
->ctype
) {
865 case NIX_AQ_CTYPE_RQ
:
866 /* Check if index exceeds max no of queues */
867 if (!pfvf
->rq_ctx
|| req
->qidx
>= pfvf
->rq_ctx
->qsize
)
868 rc
= NIX_AF_ERR_AQ_ENQUEUE
;
870 case NIX_AQ_CTYPE_SQ
:
871 if (!pfvf
->sq_ctx
|| req
->qidx
>= pfvf
->sq_ctx
->qsize
)
872 rc
= NIX_AF_ERR_AQ_ENQUEUE
;
874 case NIX_AQ_CTYPE_CQ
:
875 if (!pfvf
->cq_ctx
|| req
->qidx
>= pfvf
->cq_ctx
->qsize
)
876 rc
= NIX_AF_ERR_AQ_ENQUEUE
;
878 case NIX_AQ_CTYPE_RSS
:
879 /* Check if RSS is enabled and qidx is within range */
880 cfg
= rvu_read64(rvu
, blkaddr
, NIX_AF_LFX_RSS_CFG(nixlf
));
881 if (!(cfg
& BIT_ULL(4)) || !pfvf
->rss_ctx
||
882 (req
->qidx
>= (256UL << (cfg
& 0xF))))
883 rc
= NIX_AF_ERR_AQ_ENQUEUE
;
885 case NIX_AQ_CTYPE_MCE
:
886 cfg
= rvu_read64(rvu
, blkaddr
, NIX_AF_RX_MCAST_CFG
);
888 /* Check if index exceeds MCE list length */
889 if (!nix_hw
->mcast
.mce_ctx
||
890 (req
->qidx
>= (256UL << (cfg
& 0xF))))
891 rc
= NIX_AF_ERR_AQ_ENQUEUE
;
893 /* Adding multicast lists for requests from PF/VFs is not
894 * yet supported, so ignore this.
897 rc
= NIX_AF_ERR_AQ_ENQUEUE
;
899 case NIX_AQ_CTYPE_BANDPROF
:
900 if (nix_verify_bandprof((struct nix_cn10k_aq_enq_req
*)req
,
902 rc
= NIX_AF_ERR_INVALID_BANDPROF
;
905 rc
= NIX_AF_ERR_AQ_ENQUEUE
;
911 /* Check if SQ pointed SMQ belongs to this PF/VF or not */
912 if (req
->ctype
== NIX_AQ_CTYPE_SQ
&&
913 ((req
->op
== NIX_AQ_INSTOP_INIT
&& req
->sq
.ena
) ||
914 (req
->op
== NIX_AQ_INSTOP_WRITE
&&
915 req
->sq_mask
.ena
&& req
->sq_mask
.smq
&& req
->sq
.ena
))) {
916 if (!is_valid_txschq(rvu
, blkaddr
, NIX_TXSCH_LVL_SMQ
,
917 pcifunc
, req
->sq
.smq
))
918 return NIX_AF_ERR_AQ_ENQUEUE
;
921 memset(&inst
, 0, sizeof(struct nix_aq_inst_s
));
923 inst
.cindex
= req
->qidx
;
924 inst
.ctype
= req
->ctype
;
926 /* Currently we are not supporting enqueuing multiple instructions,
927 * so always choose first entry in result memory.
929 inst
.res_addr
= (u64
)aq
->res
->iova
;
931 /* Hardware uses same aq->res->base for updating result of
932 * previous instruction hence wait here till it is done.
934 spin_lock(&aq
->lock
);
936 /* Clean result + context memory */
937 memset(aq
->res
->base
, 0, aq
->res
->entry_sz
);
938 /* Context needs to be written at RES_ADDR + 128 */
939 ctx
= aq
->res
->base
+ 128;
940 /* Mask needs to be written at RES_ADDR + 256 */
941 mask
= aq
->res
->base
+ 256;
944 case NIX_AQ_INSTOP_WRITE
:
945 if (req
->ctype
== NIX_AQ_CTYPE_RQ
)
946 memcpy(mask
, &req
->rq_mask
,
947 sizeof(struct nix_rq_ctx_s
));
948 else if (req
->ctype
== NIX_AQ_CTYPE_SQ
)
949 memcpy(mask
, &req
->sq_mask
,
950 sizeof(struct nix_sq_ctx_s
));
951 else if (req
->ctype
== NIX_AQ_CTYPE_CQ
)
952 memcpy(mask
, &req
->cq_mask
,
953 sizeof(struct nix_cq_ctx_s
));
954 else if (req
->ctype
== NIX_AQ_CTYPE_RSS
)
955 memcpy(mask
, &req
->rss_mask
,
956 sizeof(struct nix_rsse_s
));
957 else if (req
->ctype
== NIX_AQ_CTYPE_MCE
)
958 memcpy(mask
, &req
->mce_mask
,
959 sizeof(struct nix_rx_mce_s
));
960 else if (req
->ctype
== NIX_AQ_CTYPE_BANDPROF
)
961 memcpy(mask
, &req
->prof_mask
,
962 sizeof(struct nix_bandprof_s
));
964 case NIX_AQ_INSTOP_INIT
:
965 if (req
->ctype
== NIX_AQ_CTYPE_RQ
)
966 memcpy(ctx
, &req
->rq
, sizeof(struct nix_rq_ctx_s
));
967 else if (req
->ctype
== NIX_AQ_CTYPE_SQ
)
968 memcpy(ctx
, &req
->sq
, sizeof(struct nix_sq_ctx_s
));
969 else if (req
->ctype
== NIX_AQ_CTYPE_CQ
)
970 memcpy(ctx
, &req
->cq
, sizeof(struct nix_cq_ctx_s
));
971 else if (req
->ctype
== NIX_AQ_CTYPE_RSS
)
972 memcpy(ctx
, &req
->rss
, sizeof(struct nix_rsse_s
));
973 else if (req
->ctype
== NIX_AQ_CTYPE_MCE
)
974 memcpy(ctx
, &req
->mce
, sizeof(struct nix_rx_mce_s
));
975 else if (req
->ctype
== NIX_AQ_CTYPE_BANDPROF
)
976 memcpy(ctx
, &req
->prof
, sizeof(struct nix_bandprof_s
));
978 case NIX_AQ_INSTOP_NOP
:
979 case NIX_AQ_INSTOP_READ
:
980 case NIX_AQ_INSTOP_LOCK
:
981 case NIX_AQ_INSTOP_UNLOCK
:
984 rc
= NIX_AF_ERR_AQ_ENQUEUE
;
985 spin_unlock(&aq
->lock
);
989 /* Submit the instruction to AQ */
990 rc
= nix_aq_enqueue_wait(rvu
, block
, &inst
);
992 spin_unlock(&aq
->lock
);
996 /* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */
997 if (req
->op
== NIX_AQ_INSTOP_INIT
) {
998 if (req
->ctype
== NIX_AQ_CTYPE_RQ
&& req
->rq
.ena
)
999 __set_bit(req
->qidx
, pfvf
->rq_bmap
);
1000 if (req
->ctype
== NIX_AQ_CTYPE_SQ
&& req
->sq
.ena
)
1001 __set_bit(req
->qidx
, pfvf
->sq_bmap
);
1002 if (req
->ctype
== NIX_AQ_CTYPE_CQ
&& req
->cq
.ena
)
1003 __set_bit(req
->qidx
, pfvf
->cq_bmap
);
1006 if (req
->op
== NIX_AQ_INSTOP_WRITE
) {
1007 if (req
->ctype
== NIX_AQ_CTYPE_RQ
) {
1008 ena
= (req
->rq
.ena
& req
->rq_mask
.ena
) |
1009 (test_bit(req
->qidx
, pfvf
->rq_bmap
) &
1012 __set_bit(req
->qidx
, pfvf
->rq_bmap
);
1014 __clear_bit(req
->qidx
, pfvf
->rq_bmap
);
1016 if (req
->ctype
== NIX_AQ_CTYPE_SQ
) {
1017 ena
= (req
->rq
.ena
& req
->sq_mask
.ena
) |
1018 (test_bit(req
->qidx
, pfvf
->sq_bmap
) &
1021 __set_bit(req
->qidx
, pfvf
->sq_bmap
);
1023 __clear_bit(req
->qidx
, pfvf
->sq_bmap
);
1025 if (req
->ctype
== NIX_AQ_CTYPE_CQ
) {
1026 ena
= (req
->rq
.ena
& req
->cq_mask
.ena
) |
1027 (test_bit(req
->qidx
, pfvf
->cq_bmap
) &
1030 __set_bit(req
->qidx
, pfvf
->cq_bmap
);
1032 __clear_bit(req
->qidx
, pfvf
->cq_bmap
);
1037 /* Copy read context into mailbox */
1038 if (req
->op
== NIX_AQ_INSTOP_READ
) {
1039 if (req
->ctype
== NIX_AQ_CTYPE_RQ
)
1040 memcpy(&rsp
->rq
, ctx
,
1041 sizeof(struct nix_rq_ctx_s
));
1042 else if (req
->ctype
== NIX_AQ_CTYPE_SQ
)
1043 memcpy(&rsp
->sq
, ctx
,
1044 sizeof(struct nix_sq_ctx_s
));
1045 else if (req
->ctype
== NIX_AQ_CTYPE_CQ
)
1046 memcpy(&rsp
->cq
, ctx
,
1047 sizeof(struct nix_cq_ctx_s
));
1048 else if (req
->ctype
== NIX_AQ_CTYPE_RSS
)
1049 memcpy(&rsp
->rss
, ctx
,
1050 sizeof(struct nix_rsse_s
));
1051 else if (req
->ctype
== NIX_AQ_CTYPE_MCE
)
1052 memcpy(&rsp
->mce
, ctx
,
1053 sizeof(struct nix_rx_mce_s
));
1054 else if (req
->ctype
== NIX_AQ_CTYPE_BANDPROF
)
1055 memcpy(&rsp
->prof
, ctx
,
1056 sizeof(struct nix_bandprof_s
));
1060 spin_unlock(&aq
->lock
);
1064 static int rvu_nix_aq_enq_inst(struct rvu
*rvu
, struct nix_aq_enq_req
*req
,
1065 struct nix_aq_enq_rsp
*rsp
)
1067 struct nix_hw
*nix_hw
;
1070 blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NIX
, req
->hdr
.pcifunc
);
1072 return NIX_AF_ERR_AF_LF_INVALID
;
1074 nix_hw
= get_nix_hw(rvu
->hw
, blkaddr
);
1076 return NIX_AF_ERR_INVALID_NIXBLK
;
1078 return rvu_nix_blk_aq_enq_inst(rvu
, nix_hw
, req
, rsp
);
1081 static const char *nix_get_ctx_name(int ctype
)
1084 case NIX_AQ_CTYPE_CQ
:
1086 case NIX_AQ_CTYPE_SQ
:
1088 case NIX_AQ_CTYPE_RQ
:
1090 case NIX_AQ_CTYPE_RSS
:
1096 static int nix_lf_hwctx_disable(struct rvu
*rvu
, struct hwctx_disable_req
*req
)
1098 struct rvu_pfvf
*pfvf
= rvu_get_pfvf(rvu
, req
->hdr
.pcifunc
);
1099 struct nix_aq_enq_req aq_req
;
1100 unsigned long *bmap
;
1101 int qidx
, q_cnt
= 0;
1104 if (!pfvf
->cq_ctx
|| !pfvf
->sq_ctx
|| !pfvf
->rq_ctx
)
1105 return NIX_AF_ERR_AQ_ENQUEUE
;
1107 memset(&aq_req
, 0, sizeof(struct nix_aq_enq_req
));
1108 aq_req
.hdr
.pcifunc
= req
->hdr
.pcifunc
;
1110 if (req
->ctype
== NIX_AQ_CTYPE_CQ
) {
1112 aq_req
.cq_mask
.ena
= 1;
1113 aq_req
.cq
.bp_ena
= 0;
1114 aq_req
.cq_mask
.bp_ena
= 1;
1115 q_cnt
= pfvf
->cq_ctx
->qsize
;
1116 bmap
= pfvf
->cq_bmap
;
1118 if (req
->ctype
== NIX_AQ_CTYPE_SQ
) {
1120 aq_req
.sq_mask
.ena
= 1;
1121 q_cnt
= pfvf
->sq_ctx
->qsize
;
1122 bmap
= pfvf
->sq_bmap
;
1124 if (req
->ctype
== NIX_AQ_CTYPE_RQ
) {
1126 aq_req
.rq_mask
.ena
= 1;
1127 q_cnt
= pfvf
->rq_ctx
->qsize
;
1128 bmap
= pfvf
->rq_bmap
;
1131 aq_req
.ctype
= req
->ctype
;
1132 aq_req
.op
= NIX_AQ_INSTOP_WRITE
;
1134 for (qidx
= 0; qidx
< q_cnt
; qidx
++) {
1135 if (!test_bit(qidx
, bmap
))
1138 rc
= rvu_nix_aq_enq_inst(rvu
, &aq_req
, NULL
);
1141 dev_err(rvu
->dev
, "Failed to disable %s:%d context\n",
1142 nix_get_ctx_name(req
->ctype
), qidx
);
1149 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
1150 static int nix_lf_hwctx_lockdown(struct rvu
*rvu
, struct nix_aq_enq_req
*req
)
1152 struct nix_aq_enq_req lock_ctx_req
;
1155 if (req
->op
!= NIX_AQ_INSTOP_INIT
)
1158 if (req
->ctype
== NIX_AQ_CTYPE_MCE
||
1159 req
->ctype
== NIX_AQ_CTYPE_DYNO
)
1162 memset(&lock_ctx_req
, 0, sizeof(struct nix_aq_enq_req
));
1163 lock_ctx_req
.hdr
.pcifunc
= req
->hdr
.pcifunc
;
1164 lock_ctx_req
.ctype
= req
->ctype
;
1165 lock_ctx_req
.op
= NIX_AQ_INSTOP_LOCK
;
1166 lock_ctx_req
.qidx
= req
->qidx
;
1167 err
= rvu_nix_aq_enq_inst(rvu
, &lock_ctx_req
, NULL
);
1170 "PFUNC 0x%x: Failed to lock NIX %s:%d context\n",
1172 nix_get_ctx_name(req
->ctype
), req
->qidx
);
1176 int rvu_mbox_handler_nix_aq_enq(struct rvu
*rvu
,
1177 struct nix_aq_enq_req
*req
,
1178 struct nix_aq_enq_rsp
*rsp
)
1182 err
= rvu_nix_aq_enq_inst(rvu
, req
, rsp
);
1184 err
= nix_lf_hwctx_lockdown(rvu
, req
);
1189 int rvu_mbox_handler_nix_aq_enq(struct rvu
*rvu
,
1190 struct nix_aq_enq_req
*req
,
1191 struct nix_aq_enq_rsp
*rsp
)
1193 return rvu_nix_aq_enq_inst(rvu
, req
, rsp
);
1196 /* CN10K mbox handler */
1197 int rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu
*rvu
,
1198 struct nix_cn10k_aq_enq_req
*req
,
1199 struct nix_cn10k_aq_enq_rsp
*rsp
)
1201 return rvu_nix_aq_enq_inst(rvu
, (struct nix_aq_enq_req
*)req
,
1202 (struct nix_aq_enq_rsp
*)rsp
);
1205 int rvu_mbox_handler_nix_hwctx_disable(struct rvu
*rvu
,
1206 struct hwctx_disable_req
*req
,
1207 struct msg_rsp
*rsp
)
1209 return nix_lf_hwctx_disable(rvu
, req
);
1212 int rvu_mbox_handler_nix_lf_alloc(struct rvu
*rvu
,
1213 struct nix_lf_alloc_req
*req
,
1214 struct nix_lf_alloc_rsp
*rsp
)
1216 int nixlf
, qints
, hwctx_size
, intf
, err
, rc
= 0;
1217 struct rvu_hwinfo
*hw
= rvu
->hw
;
1218 u16 pcifunc
= req
->hdr
.pcifunc
;
1219 struct rvu_block
*block
;
1220 struct rvu_pfvf
*pfvf
;
1224 if (!req
->rq_cnt
|| !req
->sq_cnt
|| !req
->cq_cnt
)
1225 return NIX_AF_ERR_PARAM
;
1228 req
->way_mask
&= 0xFFFF;
1230 pfvf
= rvu_get_pfvf(rvu
, pcifunc
);
1231 blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NIX
, pcifunc
);
1232 if (!pfvf
->nixlf
|| blkaddr
< 0)
1233 return NIX_AF_ERR_AF_LF_INVALID
;
1235 block
= &hw
->block
[blkaddr
];
1236 nixlf
= rvu_get_lf(rvu
, block
, pcifunc
, 0);
1238 return NIX_AF_ERR_AF_LF_INVALID
;
1240 /* Check if requested 'NIXLF <=> NPALF' mapping is valid */
1241 if (req
->npa_func
) {
1242 /* If default, use 'this' NIXLF's PFFUNC */
1243 if (req
->npa_func
== RVU_DEFAULT_PF_FUNC
)
1244 req
->npa_func
= pcifunc
;
1245 if (!is_pffunc_map_valid(rvu
, req
->npa_func
, BLKTYPE_NPA
))
1246 return NIX_AF_INVAL_NPA_PF_FUNC
;
1249 /* Check if requested 'NIXLF <=> SSOLF' mapping is valid */
1250 if (req
->sso_func
) {
1251 /* If default, use 'this' NIXLF's PFFUNC */
1252 if (req
->sso_func
== RVU_DEFAULT_PF_FUNC
)
1253 req
->sso_func
= pcifunc
;
1254 if (!is_pffunc_map_valid(rvu
, req
->sso_func
, BLKTYPE_SSO
))
1255 return NIX_AF_INVAL_SSO_PF_FUNC
;
1258 /* If RSS is being enabled, check if requested config is valid.
1259 * RSS table size should be power of two, otherwise
1260 * RSS_GRP::OFFSET + adder might go beyond that group or
1261 * won't be able to use entire table.
1263 if (req
->rss_sz
&& (req
->rss_sz
> MAX_RSS_INDIR_TBL_SIZE
||
1264 !is_power_of_2(req
->rss_sz
)))
1265 return NIX_AF_ERR_RSS_SIZE_INVALID
;
1268 (!req
->rss_grps
|| req
->rss_grps
> MAX_RSS_GROUPS
))
1269 return NIX_AF_ERR_RSS_GRPS_INVALID
;
1271 /* Reset this NIX LF */
1272 err
= rvu_lf_reset(rvu
, block
, nixlf
);
1274 dev_err(rvu
->dev
, "Failed to reset NIX%d LF%d\n",
1275 block
->addr
- BLKADDR_NIX0
, nixlf
);
1276 return NIX_AF_ERR_LF_RESET
;
1279 ctx_cfg
= rvu_read64(rvu
, blkaddr
, NIX_AF_CONST3
);
1281 /* Alloc NIX RQ HW context memory and config the base */
1282 hwctx_size
= 1UL << ((ctx_cfg
>> 4) & 0xF);
1283 err
= qmem_alloc(rvu
->dev
, &pfvf
->rq_ctx
, req
->rq_cnt
, hwctx_size
);
1287 pfvf
->rq_bmap
= kcalloc(req
->rq_cnt
, sizeof(long), GFP_KERNEL
);
1291 rvu_write64(rvu
, blkaddr
, NIX_AF_LFX_RQS_BASE(nixlf
),
1292 (u64
)pfvf
->rq_ctx
->iova
);
1294 /* Set caching and queue count in HW */
1295 cfg
= BIT_ULL(36) | (req
->rq_cnt
- 1) | req
->way_mask
<< 20;
1296 rvu_write64(rvu
, blkaddr
, NIX_AF_LFX_RQS_CFG(nixlf
), cfg
);
1298 /* Alloc NIX SQ HW context memory and config the base */
1299 hwctx_size
= 1UL << (ctx_cfg
& 0xF);
1300 err
= qmem_alloc(rvu
->dev
, &pfvf
->sq_ctx
, req
->sq_cnt
, hwctx_size
);
1304 pfvf
->sq_bmap
= kcalloc(req
->sq_cnt
, sizeof(long), GFP_KERNEL
);
1308 rvu_write64(rvu
, blkaddr
, NIX_AF_LFX_SQS_BASE(nixlf
),
1309 (u64
)pfvf
->sq_ctx
->iova
);
1311 cfg
= BIT_ULL(36) | (req
->sq_cnt
- 1) | req
->way_mask
<< 20;
1312 rvu_write64(rvu
, blkaddr
, NIX_AF_LFX_SQS_CFG(nixlf
), cfg
);
1314 /* Alloc NIX CQ HW context memory and config the base */
1315 hwctx_size
= 1UL << ((ctx_cfg
>> 8) & 0xF);
1316 err
= qmem_alloc(rvu
->dev
, &pfvf
->cq_ctx
, req
->cq_cnt
, hwctx_size
);
1320 pfvf
->cq_bmap
= kcalloc(req
->cq_cnt
, sizeof(long), GFP_KERNEL
);
1324 rvu_write64(rvu
, blkaddr
, NIX_AF_LFX_CQS_BASE(nixlf
),
1325 (u64
)pfvf
->cq_ctx
->iova
);
1327 cfg
= BIT_ULL(36) | (req
->cq_cnt
- 1) | req
->way_mask
<< 20;
1328 rvu_write64(rvu
, blkaddr
, NIX_AF_LFX_CQS_CFG(nixlf
), cfg
);
1330 /* Initialize receive side scaling (RSS) */
1331 hwctx_size
= 1UL << ((ctx_cfg
>> 12) & 0xF);
1332 err
= nixlf_rss_ctx_init(rvu
, blkaddr
, pfvf
, nixlf
, req
->rss_sz
,
1333 req
->rss_grps
, hwctx_size
, req
->way_mask
,
1334 !!(req
->flags
& NIX_LF_RSS_TAG_LSB_AS_ADDER
));
1338 /* Alloc memory for CQINT's HW contexts */
1339 cfg
= rvu_read64(rvu
, blkaddr
, NIX_AF_CONST2
);
1340 qints
= (cfg
>> 24) & 0xFFF;
1341 hwctx_size
= 1UL << ((ctx_cfg
>> 24) & 0xF);
1342 err
= qmem_alloc(rvu
->dev
, &pfvf
->cq_ints_ctx
, qints
, hwctx_size
);
1346 rvu_write64(rvu
, blkaddr
, NIX_AF_LFX_CINTS_BASE(nixlf
),
1347 (u64
)pfvf
->cq_ints_ctx
->iova
);
1349 rvu_write64(rvu
, blkaddr
, NIX_AF_LFX_CINTS_CFG(nixlf
),
1350 BIT_ULL(36) | req
->way_mask
<< 20);
1352 /* Alloc memory for QINT's HW contexts */
1353 cfg
= rvu_read64(rvu
, blkaddr
, NIX_AF_CONST2
);
1354 qints
= (cfg
>> 12) & 0xFFF;
1355 hwctx_size
= 1UL << ((ctx_cfg
>> 20) & 0xF);
1356 err
= qmem_alloc(rvu
->dev
, &pfvf
->nix_qints_ctx
, qints
, hwctx_size
);
1360 rvu_write64(rvu
, blkaddr
, NIX_AF_LFX_QINTS_BASE(nixlf
),
1361 (u64
)pfvf
->nix_qints_ctx
->iova
);
1362 rvu_write64(rvu
, blkaddr
, NIX_AF_LFX_QINTS_CFG(nixlf
),
1363 BIT_ULL(36) | req
->way_mask
<< 20);
1365 /* Setup VLANX TPID's.
1366 * Use VLAN1 for 802.1Q
1367 * and VLAN0 for 802.1AD.
1369 cfg
= (0x8100ULL
<< 16) | 0x88A8ULL
;
1370 rvu_write64(rvu
, blkaddr
, NIX_AF_LFX_TX_CFG(nixlf
), cfg
);
1372 /* Enable LMTST for this NIX LF */
1373 rvu_write64(rvu
, blkaddr
, NIX_AF_LFX_TX_CFG2(nixlf
), BIT_ULL(0));
1375 /* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */
1377 cfg
= req
->npa_func
;
1379 cfg
|= (u64
)req
->sso_func
<< 16;
1381 cfg
|= (u64
)req
->xqe_sz
<< 33;
1382 rvu_write64(rvu
, blkaddr
, NIX_AF_LFX_CFG(nixlf
), cfg
);
1384 /* Config Rx pkt length, csum checks and apad enable / disable */
1385 rvu_write64(rvu
, blkaddr
, NIX_AF_LFX_RX_CFG(nixlf
), req
->rx_cfg
);
1387 /* Configure pkind for TX parse config */
1388 cfg
= NPC_TX_DEF_PKIND
;
1389 rvu_write64(rvu
, blkaddr
, NIX_AF_LFX_TX_PARSE_CFG(nixlf
), cfg
);
1391 intf
= is_afvf(pcifunc
) ? NIX_INTF_TYPE_LBK
: NIX_INTF_TYPE_CGX
;
1392 if (is_sdp_pfvf(pcifunc
))
1393 intf
= NIX_INTF_TYPE_SDP
;
1395 err
= nix_interface_init(rvu
, pcifunc
, intf
, nixlf
, rsp
,
1396 !!(req
->flags
& NIX_LF_LBK_BLK_SEL
));
1400 /* Disable NPC entries as NIXLF's contexts are not initialized yet */
1401 rvu_npc_disable_default_entries(rvu
, pcifunc
, nixlf
);
1403 /* Configure RX VTAG Type 7 (strip) for vf vlan */
1404 rvu_write64(rvu
, blkaddr
,
1405 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf
, NIX_AF_LFX_RX_VTAG_TYPE7
),
1406 VTAGSIZE_T4
| VTAG_STRIP
);
1411 nix_ctx_free(rvu
, pfvf
);
1415 /* Set macaddr of this PF/VF */
1416 ether_addr_copy(rsp
->mac_addr
, pfvf
->mac_addr
);
1418 /* set SQB size info */
1419 cfg
= rvu_read64(rvu
, blkaddr
, NIX_AF_SQ_CONST
);
1420 rsp
->sqb_size
= (cfg
>> 34) & 0xFFFF;
1421 rsp
->rx_chan_base
= pfvf
->rx_chan_base
;
1422 rsp
->tx_chan_base
= pfvf
->tx_chan_base
;
1423 rsp
->rx_chan_cnt
= pfvf
->rx_chan_cnt
;
1424 rsp
->tx_chan_cnt
= pfvf
->tx_chan_cnt
;
1425 rsp
->lso_tsov4_idx
= NIX_LSO_FORMAT_IDX_TSOV4
;
1426 rsp
->lso_tsov6_idx
= NIX_LSO_FORMAT_IDX_TSOV6
;
1427 /* Get HW supported stat count */
1428 cfg
= rvu_read64(rvu
, blkaddr
, NIX_AF_CONST1
);
1429 rsp
->lf_rx_stats
= ((cfg
>> 32) & 0xFF);
1430 rsp
->lf_tx_stats
= ((cfg
>> 24) & 0xFF);
1431 /* Get count of CQ IRQs and error IRQs supported per LF */
1432 cfg
= rvu_read64(rvu
, blkaddr
, NIX_AF_CONST2
);
1433 rsp
->qints
= ((cfg
>> 12) & 0xFFF);
1434 rsp
->cints
= ((cfg
>> 24) & 0xFFF);
1435 rsp
->cgx_links
= hw
->cgx_links
;
1436 rsp
->lbk_links
= hw
->lbk_links
;
1437 rsp
->sdp_links
= hw
->sdp_links
;
1442 int rvu_mbox_handler_nix_lf_free(struct rvu
*rvu
, struct nix_lf_free_req
*req
,
1443 struct msg_rsp
*rsp
)
1445 struct rvu_hwinfo
*hw
= rvu
->hw
;
1446 u16 pcifunc
= req
->hdr
.pcifunc
;
1447 struct rvu_block
*block
;
1448 int blkaddr
, nixlf
, err
;
1449 struct rvu_pfvf
*pfvf
;
1451 pfvf
= rvu_get_pfvf(rvu
, pcifunc
);
1452 blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NIX
, pcifunc
);
1453 if (!pfvf
->nixlf
|| blkaddr
< 0)
1454 return NIX_AF_ERR_AF_LF_INVALID
;
1456 block
= &hw
->block
[blkaddr
];
1457 nixlf
= rvu_get_lf(rvu
, block
, pcifunc
, 0);
1459 return NIX_AF_ERR_AF_LF_INVALID
;
1461 if (req
->flags
& NIX_LF_DISABLE_FLOWS
)
1462 rvu_npc_disable_mcam_entries(rvu
, pcifunc
, nixlf
);
1464 rvu_npc_free_mcam_entries(rvu
, pcifunc
, nixlf
);
1466 /* Free any tx vtag def entries used by this NIX LF */
1467 if (!(req
->flags
& NIX_LF_DONT_FREE_TX_VTAG
))
1468 nix_free_tx_vtag_entries(rvu
, pcifunc
);
1470 nix_interface_deinit(rvu
, pcifunc
, nixlf
);
1472 /* Reset this NIX LF */
1473 err
= rvu_lf_reset(rvu
, block
, nixlf
);
1475 dev_err(rvu
->dev
, "Failed to reset NIX%d LF%d\n",
1476 block
->addr
- BLKADDR_NIX0
, nixlf
);
1477 return NIX_AF_ERR_LF_RESET
;
1480 nix_ctx_free(rvu
, pfvf
);
1485 int rvu_mbox_handler_nix_mark_format_cfg(struct rvu
*rvu
,
1486 struct nix_mark_format_cfg
*req
,
1487 struct nix_mark_format_cfg_rsp
*rsp
)
1489 u16 pcifunc
= req
->hdr
.pcifunc
;
1490 struct nix_hw
*nix_hw
;
1491 struct rvu_pfvf
*pfvf
;
1495 pfvf
= rvu_get_pfvf(rvu
, pcifunc
);
1496 blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NIX
, pcifunc
);
1497 if (!pfvf
->nixlf
|| blkaddr
< 0)
1498 return NIX_AF_ERR_AF_LF_INVALID
;
1500 nix_hw
= get_nix_hw(rvu
->hw
, blkaddr
);
1502 return NIX_AF_ERR_INVALID_NIXBLK
;
1504 cfg
= (((u32
)req
->offset
& 0x7) << 16) |
1505 (((u32
)req
->y_mask
& 0xF) << 12) |
1506 (((u32
)req
->y_val
& 0xF) << 8) |
1507 (((u32
)req
->r_mask
& 0xF) << 4) | ((u32
)req
->r_val
& 0xF);
1509 rc
= rvu_nix_reserve_mark_format(rvu
, nix_hw
, blkaddr
, cfg
);
1511 dev_err(rvu
->dev
, "No mark_format_ctl for (pf:%d, vf:%d)",
1512 rvu_get_pf(pcifunc
), pcifunc
& RVU_PFVF_FUNC_MASK
);
1513 return NIX_AF_ERR_MARK_CFG_FAIL
;
1516 rsp
->mark_format_idx
= rc
;
1520 /* Handle shaper update specially for few revisions */
1522 handle_txschq_shaper_update(struct rvu
*rvu
, int blkaddr
, int nixlf
,
1523 int lvl
, u64 reg
, u64 regval
)
1525 u64 regbase
, oldval
, sw_xoff
= 0;
1526 u64 dbgval
, md_debug0
= 0;
1527 unsigned long poll_tmo
;
1531 regbase
= reg
& 0xFFFF;
1532 schq
= TXSCHQ_IDX(reg
, TXSCHQ_IDX_SHIFT
);
1534 /* Check for rate register */
1536 case NIX_TXSCH_LVL_TL1
:
1537 md_debug0
= NIX_AF_TL1X_MD_DEBUG0(schq
);
1538 sw_xoff
= NIX_AF_TL1X_SW_XOFF(schq
);
1540 rate_reg
= !!(regbase
== NIX_AF_TL1X_CIR(0));
1542 case NIX_TXSCH_LVL_TL2
:
1543 md_debug0
= NIX_AF_TL2X_MD_DEBUG0(schq
);
1544 sw_xoff
= NIX_AF_TL2X_SW_XOFF(schq
);
1546 rate_reg
= (regbase
== NIX_AF_TL2X_CIR(0) ||
1547 regbase
== NIX_AF_TL2X_PIR(0));
1549 case NIX_TXSCH_LVL_TL3
:
1550 md_debug0
= NIX_AF_TL3X_MD_DEBUG0(schq
);
1551 sw_xoff
= NIX_AF_TL3X_SW_XOFF(schq
);
1553 rate_reg
= (regbase
== NIX_AF_TL3X_CIR(0) ||
1554 regbase
== NIX_AF_TL3X_PIR(0));
1556 case NIX_TXSCH_LVL_TL4
:
1557 md_debug0
= NIX_AF_TL4X_MD_DEBUG0(schq
);
1558 sw_xoff
= NIX_AF_TL4X_SW_XOFF(schq
);
1560 rate_reg
= (regbase
== NIX_AF_TL4X_CIR(0) ||
1561 regbase
== NIX_AF_TL4X_PIR(0));
1563 case NIX_TXSCH_LVL_MDQ
:
1564 sw_xoff
= NIX_AF_MDQX_SW_XOFF(schq
);
1565 rate_reg
= (regbase
== NIX_AF_MDQX_CIR(0) ||
1566 regbase
== NIX_AF_MDQX_PIR(0));
1573 /* Nothing special to do when state is not toggled */
1574 oldval
= rvu_read64(rvu
, blkaddr
, reg
);
1575 if ((oldval
& 0x1) == (regval
& 0x1)) {
1576 rvu_write64(rvu
, blkaddr
, reg
, regval
);
1580 /* PIR/CIR disable */
1581 if (!(regval
& 0x1)) {
1582 rvu_write64(rvu
, blkaddr
, sw_xoff
, 1);
1583 rvu_write64(rvu
, blkaddr
, reg
, 0);
1585 rvu_write64(rvu
, blkaddr
, sw_xoff
, 0);
1589 /* PIR/CIR enable */
1590 rvu_write64(rvu
, blkaddr
, sw_xoff
, 1);
1592 poll_tmo
= jiffies
+ usecs_to_jiffies(10000);
1593 /* Wait until VLD(bit32) == 1 or C_CON(bit48) == 0 */
1595 if (time_after(jiffies
, poll_tmo
)) {
1597 "NIXLF%d: TLX%u(lvl %u) CIR/PIR enable failed\n",
1602 dbgval
= rvu_read64(rvu
, blkaddr
, md_debug0
);
1603 } while (!(dbgval
& BIT_ULL(32)) && (dbgval
& BIT_ULL(48)));
1605 rvu_write64(rvu
, blkaddr
, reg
, regval
);
1607 rvu_write64(rvu
, blkaddr
, sw_xoff
, 0);
1611 /* Disable shaping of pkts by a scheduler queue
1612 * at a given scheduler level.
1614 static void nix_reset_tx_shaping(struct rvu
*rvu
, int blkaddr
,
1615 int nixlf
, int lvl
, int schq
)
1617 struct rvu_hwinfo
*hw
= rvu
->hw
;
1618 u64 cir_reg
= 0, pir_reg
= 0;
1622 case NIX_TXSCH_LVL_TL1
:
1623 cir_reg
= NIX_AF_TL1X_CIR(schq
);
1624 pir_reg
= 0; /* PIR not available at TL1 */
1626 case NIX_TXSCH_LVL_TL2
:
1627 cir_reg
= NIX_AF_TL2X_CIR(schq
);
1628 pir_reg
= NIX_AF_TL2X_PIR(schq
);
1630 case NIX_TXSCH_LVL_TL3
:
1631 cir_reg
= NIX_AF_TL3X_CIR(schq
);
1632 pir_reg
= NIX_AF_TL3X_PIR(schq
);
1634 case NIX_TXSCH_LVL_TL4
:
1635 cir_reg
= NIX_AF_TL4X_CIR(schq
);
1636 pir_reg
= NIX_AF_TL4X_PIR(schq
);
1638 case NIX_TXSCH_LVL_MDQ
:
1639 cir_reg
= NIX_AF_MDQX_CIR(schq
);
1640 pir_reg
= NIX_AF_MDQX_PIR(schq
);
1644 /* Shaper state toggle needs wait/poll */
1645 if (hw
->cap
.nix_shaper_toggle_wait
) {
1647 handle_txschq_shaper_update(rvu
, blkaddr
, nixlf
,
1650 handle_txschq_shaper_update(rvu
, blkaddr
, nixlf
,
1657 cfg
= rvu_read64(rvu
, blkaddr
, cir_reg
);
1658 rvu_write64(rvu
, blkaddr
, cir_reg
, cfg
& ~BIT_ULL(0));
1662 cfg
= rvu_read64(rvu
, blkaddr
, pir_reg
);
1663 rvu_write64(rvu
, blkaddr
, pir_reg
, cfg
& ~BIT_ULL(0));
1666 static void nix_reset_tx_linkcfg(struct rvu
*rvu
, int blkaddr
,
1669 struct rvu_hwinfo
*hw
= rvu
->hw
;
1673 if (lvl
>= hw
->cap
.nix_tx_aggr_lvl
)
1676 /* Reset TL4's SDP link config */
1677 if (lvl
== NIX_TXSCH_LVL_TL4
)
1678 rvu_write64(rvu
, blkaddr
, NIX_AF_TL4X_SDP_LINK_CFG(schq
), 0x00);
1680 link_level
= rvu_read64(rvu
, blkaddr
, NIX_AF_PSE_CHANNEL_LEVEL
) & 0x01 ?
1681 NIX_TXSCH_LVL_TL3
: NIX_TXSCH_LVL_TL2
;
1682 if (lvl
!= link_level
)
1685 /* Reset TL2's CGX or LBK link config */
1686 for (link
= 0; link
< (hw
->cgx_links
+ hw
->lbk_links
); link
++)
1687 rvu_write64(rvu
, blkaddr
,
1688 NIX_AF_TL3_TL2X_LINKX_CFG(schq
, link
), 0x00);
1691 static void nix_clear_tx_xoff(struct rvu
*rvu
, int blkaddr
,
1694 struct rvu_hwinfo
*hw
= rvu
->hw
;
1697 /* Skip this if shaping is not supported */
1698 if (!hw
->cap
.nix_shaping
)
1701 /* Clear level specific SW_XOFF */
1703 case NIX_TXSCH_LVL_TL1
:
1704 reg
= NIX_AF_TL1X_SW_XOFF(schq
);
1706 case NIX_TXSCH_LVL_TL2
:
1707 reg
= NIX_AF_TL2X_SW_XOFF(schq
);
1709 case NIX_TXSCH_LVL_TL3
:
1710 reg
= NIX_AF_TL3X_SW_XOFF(schq
);
1712 case NIX_TXSCH_LVL_TL4
:
1713 reg
= NIX_AF_TL4X_SW_XOFF(schq
);
1715 case NIX_TXSCH_LVL_MDQ
:
1716 reg
= NIX_AF_MDQX_SW_XOFF(schq
);
1722 rvu_write64(rvu
, blkaddr
, reg
, 0x0);
1725 static int nix_get_tx_link(struct rvu
*rvu
, u16 pcifunc
)
1727 struct rvu_hwinfo
*hw
= rvu
->hw
;
1728 int pf
= rvu_get_pf(pcifunc
);
1729 u8 cgx_id
= 0, lmac_id
= 0;
1731 if (is_afvf(pcifunc
)) {/* LBK links */
1732 return hw
->cgx_links
;
1733 } else if (is_pf_cgxmapped(rvu
, pf
)) {
1734 rvu_get_cgx_lmac_id(rvu
->pf2cgxlmac_map
[pf
], &cgx_id
, &lmac_id
);
1735 return (cgx_id
* hw
->lmac_per_cgx
) + lmac_id
;
1739 return hw
->cgx_links
+ hw
->lbk_links
;
1742 static void nix_get_txschq_range(struct rvu
*rvu
, u16 pcifunc
,
1743 int link
, int *start
, int *end
)
1745 struct rvu_hwinfo
*hw
= rvu
->hw
;
1746 int pf
= rvu_get_pf(pcifunc
);
1748 if (is_afvf(pcifunc
)) { /* LBK links */
1749 *start
= hw
->cap
.nix_txsch_per_cgx_lmac
* link
;
1750 *end
= *start
+ hw
->cap
.nix_txsch_per_lbk_lmac
;
1751 } else if (is_pf_cgxmapped(rvu
, pf
)) { /* CGX links */
1752 *start
= hw
->cap
.nix_txsch_per_cgx_lmac
* link
;
1753 *end
= *start
+ hw
->cap
.nix_txsch_per_cgx_lmac
;
1754 } else { /* SDP link */
1755 *start
= (hw
->cap
.nix_txsch_per_cgx_lmac
* hw
->cgx_links
) +
1756 (hw
->cap
.nix_txsch_per_lbk_lmac
* hw
->lbk_links
);
1757 *end
= *start
+ hw
->cap
.nix_txsch_per_sdp_lmac
;
1761 static int nix_check_txschq_alloc_req(struct rvu
*rvu
, int lvl
, u16 pcifunc
,
1762 struct nix_hw
*nix_hw
,
1763 struct nix_txsch_alloc_req
*req
)
1765 struct rvu_hwinfo
*hw
= rvu
->hw
;
1766 int schq
, req_schq
, free_cnt
;
1767 struct nix_txsch
*txsch
;
1768 int link
, start
, end
;
1770 txsch
= &nix_hw
->txsch
[lvl
];
1771 req_schq
= req
->schq_contig
[lvl
] + req
->schq
[lvl
];
1776 link
= nix_get_tx_link(rvu
, pcifunc
);
1778 /* For traffic aggregating scheduler level, one queue is enough */
1779 if (lvl
>= hw
->cap
.nix_tx_aggr_lvl
) {
1781 return NIX_AF_ERR_TLX_ALLOC_FAIL
;
1785 /* Get free SCHQ count and check if request can be accomodated */
1786 if (hw
->cap
.nix_fixed_txschq_mapping
) {
1787 nix_get_txschq_range(rvu
, pcifunc
, link
, &start
, &end
);
1788 schq
= start
+ (pcifunc
& RVU_PFVF_FUNC_MASK
);
1789 if (end
<= txsch
->schq
.max
&& schq
< end
&&
1790 !test_bit(schq
, txsch
->schq
.bmap
))
1795 free_cnt
= rvu_rsrc_free_count(&txsch
->schq
);
1798 if (free_cnt
< req_schq
|| req_schq
> MAX_TXSCHQ_PER_FUNC
)
1799 return NIX_AF_ERR_TLX_ALLOC_FAIL
;
1801 /* If contiguous queues are needed, check for availability */
1802 if (!hw
->cap
.nix_fixed_txschq_mapping
&& req
->schq_contig
[lvl
] &&
1803 !rvu_rsrc_check_contig(&txsch
->schq
, req
->schq_contig
[lvl
]))
1804 return NIX_AF_ERR_TLX_ALLOC_FAIL
;
1809 static void nix_txsch_alloc(struct rvu
*rvu
, struct nix_txsch
*txsch
,
1810 struct nix_txsch_alloc_rsp
*rsp
,
1811 int lvl
, int start
, int end
)
1813 struct rvu_hwinfo
*hw
= rvu
->hw
;
1814 u16 pcifunc
= rsp
->hdr
.pcifunc
;
1817 /* For traffic aggregating levels, queue alloc is based
1818 * on transmit link to which PF_FUNC is mapped to.
1820 if (lvl
>= hw
->cap
.nix_tx_aggr_lvl
) {
1821 /* A single TL queue is allocated */
1822 if (rsp
->schq_contig
[lvl
]) {
1823 rsp
->schq_contig
[lvl
] = 1;
1824 rsp
->schq_contig_list
[lvl
][0] = start
;
1827 /* Both contig and non-contig reqs doesn't make sense here */
1828 if (rsp
->schq_contig
[lvl
])
1831 if (rsp
->schq
[lvl
]) {
1833 rsp
->schq_list
[lvl
][0] = start
;
1838 /* Adjust the queue request count if HW supports
1839 * only one queue per level configuration.
1841 if (hw
->cap
.nix_fixed_txschq_mapping
) {
1842 idx
= pcifunc
& RVU_PFVF_FUNC_MASK
;
1844 if (idx
>= (end
- start
) || test_bit(schq
, txsch
->schq
.bmap
)) {
1845 rsp
->schq_contig
[lvl
] = 0;
1850 if (rsp
->schq_contig
[lvl
]) {
1851 rsp
->schq_contig
[lvl
] = 1;
1852 set_bit(schq
, txsch
->schq
.bmap
);
1853 rsp
->schq_contig_list
[lvl
][0] = schq
;
1855 } else if (rsp
->schq
[lvl
]) {
1857 set_bit(schq
, txsch
->schq
.bmap
);
1858 rsp
->schq_list
[lvl
][0] = schq
;
1863 /* Allocate contiguous queue indices requesty first */
1864 if (rsp
->schq_contig
[lvl
]) {
1865 schq
= bitmap_find_next_zero_area(txsch
->schq
.bmap
,
1866 txsch
->schq
.max
, start
,
1867 rsp
->schq_contig
[lvl
], 0);
1869 rsp
->schq_contig
[lvl
] = 0;
1870 for (idx
= 0; idx
< rsp
->schq_contig
[lvl
]; idx
++) {
1871 set_bit(schq
, txsch
->schq
.bmap
);
1872 rsp
->schq_contig_list
[lvl
][idx
] = schq
;
1877 /* Allocate non-contiguous queue indices */
1878 if (rsp
->schq
[lvl
]) {
1880 for (schq
= start
; schq
< end
; schq
++) {
1881 if (!test_bit(schq
, txsch
->schq
.bmap
)) {
1882 set_bit(schq
, txsch
->schq
.bmap
);
1883 rsp
->schq_list
[lvl
][idx
++] = schq
;
1885 if (idx
== rsp
->schq
[lvl
])
1888 /* Update how many were allocated */
1889 rsp
->schq
[lvl
] = idx
;
1893 int rvu_mbox_handler_nix_txsch_alloc(struct rvu
*rvu
,
1894 struct nix_txsch_alloc_req
*req
,
1895 struct nix_txsch_alloc_rsp
*rsp
)
1897 struct rvu_hwinfo
*hw
= rvu
->hw
;
1898 u16 pcifunc
= req
->hdr
.pcifunc
;
1899 int link
, blkaddr
, rc
= 0;
1900 int lvl
, idx
, start
, end
;
1901 struct nix_txsch
*txsch
;
1902 struct nix_hw
*nix_hw
;
1907 rc
= nix_get_nixlf(rvu
, pcifunc
, &nixlf
, &blkaddr
);
1911 nix_hw
= get_nix_hw(rvu
->hw
, blkaddr
);
1913 return NIX_AF_ERR_INVALID_NIXBLK
;
1915 mutex_lock(&rvu
->rsrc_lock
);
1917 /* Check if request is valid as per HW capabilities
1918 * and can be accomodated.
1920 for (lvl
= 0; lvl
< NIX_TXSCH_LVL_CNT
; lvl
++) {
1921 rc
= nix_check_txschq_alloc_req(rvu
, lvl
, pcifunc
, nix_hw
, req
);
1926 /* Allocate requested Tx scheduler queues */
1927 for (lvl
= 0; lvl
< NIX_TXSCH_LVL_CNT
; lvl
++) {
1928 txsch
= &nix_hw
->txsch
[lvl
];
1929 pfvf_map
= txsch
->pfvf_map
;
1931 if (!req
->schq
[lvl
] && !req
->schq_contig
[lvl
])
1934 rsp
->schq
[lvl
] = req
->schq
[lvl
];
1935 rsp
->schq_contig
[lvl
] = req
->schq_contig
[lvl
];
1937 link
= nix_get_tx_link(rvu
, pcifunc
);
1939 if (lvl
>= hw
->cap
.nix_tx_aggr_lvl
) {
1942 } else if (hw
->cap
.nix_fixed_txschq_mapping
) {
1943 nix_get_txschq_range(rvu
, pcifunc
, link
, &start
, &end
);
1946 end
= txsch
->schq
.max
;
1949 nix_txsch_alloc(rvu
, txsch
, rsp
, lvl
, start
, end
);
1951 /* Reset queue config */
1952 for (idx
= 0; idx
< req
->schq_contig
[lvl
]; idx
++) {
1953 schq
= rsp
->schq_contig_list
[lvl
][idx
];
1954 if (!(TXSCH_MAP_FLAGS(pfvf_map
[schq
]) &
1955 NIX_TXSCHQ_CFG_DONE
))
1956 pfvf_map
[schq
] = TXSCH_MAP(pcifunc
, 0);
1957 nix_reset_tx_linkcfg(rvu
, blkaddr
, lvl
, schq
);
1958 nix_reset_tx_shaping(rvu
, blkaddr
, nixlf
, lvl
, schq
);
1961 for (idx
= 0; idx
< req
->schq
[lvl
]; idx
++) {
1962 schq
= rsp
->schq_list
[lvl
][idx
];
1963 if (!(TXSCH_MAP_FLAGS(pfvf_map
[schq
]) &
1964 NIX_TXSCHQ_CFG_DONE
))
1965 pfvf_map
[schq
] = TXSCH_MAP(pcifunc
, 0);
1966 nix_reset_tx_linkcfg(rvu
, blkaddr
, lvl
, schq
);
1967 nix_reset_tx_shaping(rvu
, blkaddr
, nixlf
, lvl
, schq
);
1971 rsp
->aggr_level
= hw
->cap
.nix_tx_aggr_lvl
;
1972 rsp
->aggr_lvl_rr_prio
= TXSCH_TL1_DFLT_RR_PRIO
;
1973 rsp
->link_cfg_lvl
= rvu_read64(rvu
, blkaddr
,
1974 NIX_AF_PSE_CHANNEL_LEVEL
) & 0x01 ?
1975 NIX_TXSCH_LVL_TL3
: NIX_TXSCH_LVL_TL2
;
1978 rc
= NIX_AF_ERR_TLX_ALLOC_FAIL
;
1980 mutex_unlock(&rvu
->rsrc_lock
);
1984 static int nix_smq_flush(struct rvu
*rvu
, int blkaddr
,
1985 int smq
, u16 pcifunc
, int nixlf
)
1987 int pf
= rvu_get_pf(pcifunc
);
1988 u8 cgx_id
= 0, lmac_id
= 0;
1989 int err
, restore_tx_en
= 0;
1992 /* enable cgx tx if disabled */
1993 if (is_pf_cgxmapped(rvu
, pf
)) {
1994 rvu_get_cgx_lmac_id(rvu
->pf2cgxlmac_map
[pf
], &cgx_id
, &lmac_id
);
1995 restore_tx_en
= !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id
, rvu
),
1999 cfg
= rvu_read64(rvu
, blkaddr
, NIX_AF_SMQX_CFG(smq
));
2000 /* Do SMQ flush and set enqueue xoff */
2001 cfg
|= BIT_ULL(50) | BIT_ULL(49);
2002 rvu_write64(rvu
, blkaddr
, NIX_AF_SMQX_CFG(smq
), cfg
);
2004 /* Disable backpressure from physical link,
2005 * otherwise SMQ flush may stall.
2007 rvu_cgx_enadis_rx_bp(rvu
, pf
, false);
2009 /* Wait for flush to complete */
2010 err
= rvu_poll_reg(rvu
, blkaddr
,
2011 NIX_AF_SMQX_CFG(smq
), BIT_ULL(49), true);
2014 "NIXLF%d: SMQ%d flush failed\n", nixlf
, smq
);
2016 rvu_cgx_enadis_rx_bp(rvu
, pf
, true);
2017 /* restore cgx tx state */
2019 cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id
, rvu
), lmac_id
, false);
2023 static int nix_txschq_free(struct rvu
*rvu
, u16 pcifunc
)
2025 int blkaddr
, nixlf
, lvl
, schq
, err
;
2026 struct rvu_hwinfo
*hw
= rvu
->hw
;
2027 struct nix_txsch
*txsch
;
2028 struct nix_hw
*nix_hw
;
2031 blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NIX
, pcifunc
);
2033 return NIX_AF_ERR_AF_LF_INVALID
;
2035 nix_hw
= get_nix_hw(rvu
->hw
, blkaddr
);
2037 return NIX_AF_ERR_INVALID_NIXBLK
;
2039 nixlf
= rvu_get_lf(rvu
, &hw
->block
[blkaddr
], pcifunc
, 0);
2041 return NIX_AF_ERR_AF_LF_INVALID
;
2043 /* Disable TL2/3 queue links and all XOFF's before SMQ flush*/
2044 mutex_lock(&rvu
->rsrc_lock
);
2045 for (lvl
= NIX_TXSCH_LVL_MDQ
; lvl
< NIX_TXSCH_LVL_CNT
; lvl
++) {
2046 txsch
= &nix_hw
->txsch
[lvl
];
2048 if (lvl
>= hw
->cap
.nix_tx_aggr_lvl
)
2051 for (schq
= 0; schq
< txsch
->schq
.max
; schq
++) {
2052 if (TXSCH_MAP_FUNC(txsch
->pfvf_map
[schq
]) != pcifunc
)
2054 nix_reset_tx_linkcfg(rvu
, blkaddr
, lvl
, schq
);
2055 nix_clear_tx_xoff(rvu
, blkaddr
, lvl
, schq
);
2058 nix_clear_tx_xoff(rvu
, blkaddr
, NIX_TXSCH_LVL_TL1
,
2059 nix_get_tx_link(rvu
, pcifunc
));
2061 /* On PF cleanup, clear cfg done flag as
2062 * PF would have changed default config.
2064 if (!(pcifunc
& RVU_PFVF_FUNC_MASK
)) {
2065 txsch
= &nix_hw
->txsch
[NIX_TXSCH_LVL_TL1
];
2066 schq
= nix_get_tx_link(rvu
, pcifunc
);
2067 /* Do not clear pcifunc in txsch->pfvf_map[schq] because
2068 * VF might be using this TL1 queue
2070 map_func
= TXSCH_MAP_FUNC(txsch
->pfvf_map
[schq
]);
2071 txsch
->pfvf_map
[schq
] = TXSCH_SET_FLAG(map_func
, 0x0);
2075 txsch
= &nix_hw
->txsch
[NIX_TXSCH_LVL_SMQ
];
2076 for (schq
= 0; schq
< txsch
->schq
.max
; schq
++) {
2077 if (TXSCH_MAP_FUNC(txsch
->pfvf_map
[schq
]) != pcifunc
)
2079 nix_smq_flush(rvu
, blkaddr
, schq
, pcifunc
, nixlf
);
2082 /* Now free scheduler queues to free pool */
2083 for (lvl
= 0; lvl
< NIX_TXSCH_LVL_CNT
; lvl
++) {
2084 /* TLs above aggregation level are shared across all PF
2085 * and it's VFs, hence skip freeing them.
2087 if (lvl
>= hw
->cap
.nix_tx_aggr_lvl
)
2090 txsch
= &nix_hw
->txsch
[lvl
];
2091 for (schq
= 0; schq
< txsch
->schq
.max
; schq
++) {
2092 if (TXSCH_MAP_FUNC(txsch
->pfvf_map
[schq
]) != pcifunc
)
2094 rvu_free_rsrc(&txsch
->schq
, schq
);
2095 txsch
->pfvf_map
[schq
] = TXSCH_MAP(0, NIX_TXSCHQ_FREE
);
2098 mutex_unlock(&rvu
->rsrc_lock
);
2100 /* Sync cached info for this LF in NDC-TX to LLC/DRAM */
2101 rvu_write64(rvu
, blkaddr
, NIX_AF_NDC_TX_SYNC
, BIT_ULL(12) | nixlf
);
2102 err
= rvu_poll_reg(rvu
, blkaddr
, NIX_AF_NDC_TX_SYNC
, BIT_ULL(12), true);
2104 dev_err(rvu
->dev
, "NDC-TX sync failed for NIXLF %d\n", nixlf
);
2109 static int nix_txschq_free_one(struct rvu
*rvu
,
2110 struct nix_txsch_free_req
*req
)
2112 struct rvu_hwinfo
*hw
= rvu
->hw
;
2113 u16 pcifunc
= req
->hdr
.pcifunc
;
2114 int lvl
, schq
, nixlf
, blkaddr
;
2115 struct nix_txsch
*txsch
;
2116 struct nix_hw
*nix_hw
;
2120 blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NIX
, pcifunc
);
2122 return NIX_AF_ERR_AF_LF_INVALID
;
2124 nix_hw
= get_nix_hw(rvu
->hw
, blkaddr
);
2126 return NIX_AF_ERR_INVALID_NIXBLK
;
2128 nixlf
= rvu_get_lf(rvu
, &hw
->block
[blkaddr
], pcifunc
, 0);
2130 return NIX_AF_ERR_AF_LF_INVALID
;
2132 lvl
= req
->schq_lvl
;
2134 txsch
= &nix_hw
->txsch
[lvl
];
2136 if (lvl
>= hw
->cap
.nix_tx_aggr_lvl
|| schq
>= txsch
->schq
.max
)
2139 pfvf_map
= txsch
->pfvf_map
;
2140 mutex_lock(&rvu
->rsrc_lock
);
2142 if (TXSCH_MAP_FUNC(pfvf_map
[schq
]) != pcifunc
) {
2143 rc
= NIX_AF_ERR_TLX_INVALID
;
2147 /* Clear SW_XOFF of this resource only.
2148 * For SMQ level, all path XOFF's
2149 * need to be made clear by user
2151 nix_clear_tx_xoff(rvu
, blkaddr
, lvl
, schq
);
2153 /* Flush if it is a SMQ. Onus of disabling
2154 * TL2/3 queue links before SMQ flush is on user
2156 if (lvl
== NIX_TXSCH_LVL_SMQ
&&
2157 nix_smq_flush(rvu
, blkaddr
, schq
, pcifunc
, nixlf
)) {
2158 rc
= NIX_AF_SMQ_FLUSH_FAILED
;
2162 /* Free the resource */
2163 rvu_free_rsrc(&txsch
->schq
, schq
);
2164 txsch
->pfvf_map
[schq
] = TXSCH_MAP(0, NIX_TXSCHQ_FREE
);
2165 mutex_unlock(&rvu
->rsrc_lock
);
2168 mutex_unlock(&rvu
->rsrc_lock
);
2172 int rvu_mbox_handler_nix_txsch_free(struct rvu
*rvu
,
2173 struct nix_txsch_free_req
*req
,
2174 struct msg_rsp
*rsp
)
2176 if (req
->flags
& TXSCHQ_FREE_ALL
)
2177 return nix_txschq_free(rvu
, req
->hdr
.pcifunc
);
2179 return nix_txschq_free_one(rvu
, req
);
2182 static bool is_txschq_hierarchy_valid(struct rvu
*rvu
, u16 pcifunc
, int blkaddr
,
2183 int lvl
, u64 reg
, u64 regval
)
2185 u64 regbase
= reg
& 0xFFFF;
2188 if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP
, lvl
, reg
))
2191 schq
= TXSCHQ_IDX(reg
, TXSCHQ_IDX_SHIFT
);
2192 /* Check if this schq belongs to this PF/VF or not */
2193 if (!is_valid_txschq(rvu
, blkaddr
, lvl
, pcifunc
, schq
))
2196 parent
= (regval
>> 16) & 0x1FF;
2197 /* Validate MDQ's TL4 parent */
2198 if (regbase
== NIX_AF_MDQX_PARENT(0) &&
2199 !is_valid_txschq(rvu
, blkaddr
, NIX_TXSCH_LVL_TL4
, pcifunc
, parent
))
2202 /* Validate TL4's TL3 parent */
2203 if (regbase
== NIX_AF_TL4X_PARENT(0) &&
2204 !is_valid_txschq(rvu
, blkaddr
, NIX_TXSCH_LVL_TL3
, pcifunc
, parent
))
2207 /* Validate TL3's TL2 parent */
2208 if (regbase
== NIX_AF_TL3X_PARENT(0) &&
2209 !is_valid_txschq(rvu
, blkaddr
, NIX_TXSCH_LVL_TL2
, pcifunc
, parent
))
2212 /* Validate TL2's TL1 parent */
2213 if (regbase
== NIX_AF_TL2X_PARENT(0) &&
2214 !is_valid_txschq(rvu
, blkaddr
, NIX_TXSCH_LVL_TL1
, pcifunc
, parent
))
2220 static bool is_txschq_shaping_valid(struct rvu_hwinfo
*hw
, int lvl
, u64 reg
)
2224 if (hw
->cap
.nix_shaping
)
2227 /* If shaping and coloring is not supported, then
2228 * *_CIR and *_PIR registers should not be configured.
2230 regbase
= reg
& 0xFFFF;
2233 case NIX_TXSCH_LVL_TL1
:
2234 if (regbase
== NIX_AF_TL1X_CIR(0))
2237 case NIX_TXSCH_LVL_TL2
:
2238 if (regbase
== NIX_AF_TL2X_CIR(0) ||
2239 regbase
== NIX_AF_TL2X_PIR(0))
2242 case NIX_TXSCH_LVL_TL3
:
2243 if (regbase
== NIX_AF_TL3X_CIR(0) ||
2244 regbase
== NIX_AF_TL3X_PIR(0))
2247 case NIX_TXSCH_LVL_TL4
:
2248 if (regbase
== NIX_AF_TL4X_CIR(0) ||
2249 regbase
== NIX_AF_TL4X_PIR(0))
2252 case NIX_TXSCH_LVL_MDQ
:
2253 if (regbase
== NIX_AF_MDQX_CIR(0) ||
2254 regbase
== NIX_AF_MDQX_PIR(0))
2261 static void nix_tl1_default_cfg(struct rvu
*rvu
, struct nix_hw
*nix_hw
,
2262 u16 pcifunc
, int blkaddr
)
2267 schq
= nix_get_tx_link(rvu
, pcifunc
);
2268 pfvf_map
= nix_hw
->txsch
[NIX_TXSCH_LVL_TL1
].pfvf_map
;
2269 /* Skip if PF has already done the config */
2270 if (TXSCH_MAP_FLAGS(pfvf_map
[schq
]) & NIX_TXSCHQ_CFG_DONE
)
2272 rvu_write64(rvu
, blkaddr
, NIX_AF_TL1X_TOPOLOGY(schq
),
2273 (TXSCH_TL1_DFLT_RR_PRIO
<< 1));
2275 /* On OcteonTx2 the config was in bytes and newer silcons
2276 * it's changed to weight.
2278 if (!rvu
->hw
->cap
.nix_common_dwrr_mtu
)
2279 rvu_write64(rvu
, blkaddr
, NIX_AF_TL1X_SCHEDULE(schq
),
2280 TXSCH_TL1_DFLT_RR_QTM
);
2282 rvu_write64(rvu
, blkaddr
, NIX_AF_TL1X_SCHEDULE(schq
),
2283 CN10K_MAX_DWRR_WEIGHT
);
2285 rvu_write64(rvu
, blkaddr
, NIX_AF_TL1X_CIR(schq
), 0x00);
2286 pfvf_map
[schq
] = TXSCH_SET_FLAG(pfvf_map
[schq
], NIX_TXSCHQ_CFG_DONE
);
2289 /* Register offset - [15:0]
2290 * Scheduler Queue number - [25:16]
2292 #define NIX_TX_SCHQ_MASK GENMASK_ULL(25, 0)
2294 static int nix_txschq_cfg_read(struct rvu
*rvu
, struct nix_hw
*nix_hw
,
2295 int blkaddr
, struct nix_txschq_config
*req
,
2296 struct nix_txschq_config
*rsp
)
2298 u16 pcifunc
= req
->hdr
.pcifunc
;
2302 for (idx
= 0; idx
< req
->num_regs
; idx
++) {
2303 reg
= req
->reg
[idx
];
2304 reg
&= NIX_TX_SCHQ_MASK
;
2305 schq
= TXSCHQ_IDX(reg
, TXSCHQ_IDX_SHIFT
);
2306 if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP
, req
->lvl
, reg
) ||
2307 !is_valid_txschq(rvu
, blkaddr
, req
->lvl
, pcifunc
, schq
))
2308 return NIX_AF_INVAL_TXSCHQ_CFG
;
2309 rsp
->regval
[idx
] = rvu_read64(rvu
, blkaddr
, reg
);
2311 rsp
->lvl
= req
->lvl
;
2312 rsp
->num_regs
= req
->num_regs
;
2316 static void rvu_nix_tx_tl2_cfg(struct rvu
*rvu
, int blkaddr
,
2317 u16 pcifunc
, struct nix_txsch
*txsch
)
2319 struct rvu_hwinfo
*hw
= rvu
->hw
;
2320 int lbk_link_start
, lbk_links
;
2321 u8 pf
= rvu_get_pf(pcifunc
);
2324 if (!is_pf_cgxmapped(rvu
, pf
))
2327 lbk_link_start
= hw
->cgx_links
;
2329 for (schq
= 0; schq
< txsch
->schq
.max
; schq
++) {
2330 if (TXSCH_MAP_FUNC(txsch
->pfvf_map
[schq
]) != pcifunc
)
2332 /* Enable all LBK links with channel 63 by default so that
2333 * packets can be sent to LBK with a NPC TX MCAM rule
2335 lbk_links
= hw
->lbk_links
;
2337 rvu_write64(rvu
, blkaddr
,
2338 NIX_AF_TL3_TL2X_LINKX_CFG(schq
,
2341 BIT_ULL(12) | RVU_SWITCH_LBK_CHAN
);
2345 int rvu_mbox_handler_nix_txschq_cfg(struct rvu
*rvu
,
2346 struct nix_txschq_config
*req
,
2347 struct nix_txschq_config
*rsp
)
2349 u64 reg
, val
, regval
, schq_regbase
, val_mask
;
2350 struct rvu_hwinfo
*hw
= rvu
->hw
;
2351 u16 pcifunc
= req
->hdr
.pcifunc
;
2352 struct nix_txsch
*txsch
;
2353 struct nix_hw
*nix_hw
;
2354 int blkaddr
, idx
, err
;
2358 if (req
->lvl
>= NIX_TXSCH_LVL_CNT
||
2359 req
->num_regs
> MAX_REGS_PER_MBOX_MSG
)
2360 return NIX_AF_INVAL_TXSCHQ_CFG
;
2362 err
= nix_get_nixlf(rvu
, pcifunc
, &nixlf
, &blkaddr
);
2366 nix_hw
= get_nix_hw(rvu
->hw
, blkaddr
);
2368 return NIX_AF_ERR_INVALID_NIXBLK
;
2371 return nix_txschq_cfg_read(rvu
, nix_hw
, blkaddr
, req
, rsp
);
2373 txsch
= &nix_hw
->txsch
[req
->lvl
];
2374 pfvf_map
= txsch
->pfvf_map
;
2376 if (req
->lvl
>= hw
->cap
.nix_tx_aggr_lvl
&&
2377 pcifunc
& RVU_PFVF_FUNC_MASK
) {
2378 mutex_lock(&rvu
->rsrc_lock
);
2379 if (req
->lvl
== NIX_TXSCH_LVL_TL1
)
2380 nix_tl1_default_cfg(rvu
, nix_hw
, pcifunc
, blkaddr
);
2381 mutex_unlock(&rvu
->rsrc_lock
);
2385 for (idx
= 0; idx
< req
->num_regs
; idx
++) {
2386 reg
= req
->reg
[idx
];
2387 reg
&= NIX_TX_SCHQ_MASK
;
2388 regval
= req
->regval
[idx
];
2389 schq_regbase
= reg
& 0xFFFF;
2390 val_mask
= req
->regval_mask
[idx
];
2392 if (!is_txschq_hierarchy_valid(rvu
, pcifunc
, blkaddr
,
2393 txsch
->lvl
, reg
, regval
))
2394 return NIX_AF_INVAL_TXSCHQ_CFG
;
2396 /* Check if shaping and coloring is supported */
2397 if (!is_txschq_shaping_valid(hw
, req
->lvl
, reg
))
2400 val
= rvu_read64(rvu
, blkaddr
, reg
);
2401 regval
= (val
& val_mask
) | (regval
& ~val_mask
);
2403 /* Handle shaping state toggle specially */
2404 if (hw
->cap
.nix_shaper_toggle_wait
&&
2405 handle_txschq_shaper_update(rvu
, blkaddr
, nixlf
,
2406 req
->lvl
, reg
, regval
))
2409 /* Replace PF/VF visible NIXLF slot with HW NIXLF id */
2410 if (schq_regbase
== NIX_AF_SMQX_CFG(0)) {
2411 nixlf
= rvu_get_lf(rvu
, &hw
->block
[blkaddr
],
2413 regval
&= ~(0x7FULL
<< 24);
2414 regval
|= ((u64
)nixlf
<< 24);
2417 /* Clear 'BP_ENA' config, if it's not allowed */
2418 if (!hw
->cap
.nix_tx_link_bp
) {
2419 if (schq_regbase
== NIX_AF_TL4X_SDP_LINK_CFG(0) ||
2420 (schq_regbase
& 0xFF00) ==
2421 NIX_AF_TL3_TL2X_LINKX_CFG(0, 0))
2422 regval
&= ~BIT_ULL(13);
2425 /* Mark config as done for TL1 by PF */
2426 if (schq_regbase
>= NIX_AF_TL1X_SCHEDULE(0) &&
2427 schq_regbase
<= NIX_AF_TL1X_GREEN_BYTES(0)) {
2428 schq
= TXSCHQ_IDX(reg
, TXSCHQ_IDX_SHIFT
);
2429 mutex_lock(&rvu
->rsrc_lock
);
2430 pfvf_map
[schq
] = TXSCH_SET_FLAG(pfvf_map
[schq
],
2431 NIX_TXSCHQ_CFG_DONE
);
2432 mutex_unlock(&rvu
->rsrc_lock
);
2435 /* SMQ flush is special hence split register writes such
2436 * that flush first and write rest of the bits later.
2438 if (schq_regbase
== NIX_AF_SMQX_CFG(0) &&
2439 (regval
& BIT_ULL(49))) {
2440 schq
= TXSCHQ_IDX(reg
, TXSCHQ_IDX_SHIFT
);
2441 nix_smq_flush(rvu
, blkaddr
, schq
, pcifunc
, nixlf
);
2442 regval
&= ~BIT_ULL(49);
2444 rvu_write64(rvu
, blkaddr
, reg
, regval
);
2447 rvu_nix_tx_tl2_cfg(rvu
, blkaddr
, pcifunc
,
2448 &nix_hw
->txsch
[NIX_TXSCH_LVL_TL2
]);
2452 static int nix_rx_vtag_cfg(struct rvu
*rvu
, int nixlf
, int blkaddr
,
2453 struct nix_vtag_config
*req
)
2455 u64 regval
= req
->vtag_size
;
2457 if (req
->rx
.vtag_type
> NIX_AF_LFX_RX_VTAG_TYPE7
||
2458 req
->vtag_size
> VTAGSIZE_T8
)
2461 /* RX VTAG Type 7 reserved for vf vlan */
2462 if (req
->rx
.vtag_type
== NIX_AF_LFX_RX_VTAG_TYPE7
)
2463 return NIX_AF_ERR_RX_VTAG_INUSE
;
2465 if (req
->rx
.capture_vtag
)
2466 regval
|= BIT_ULL(5);
2467 if (req
->rx
.strip_vtag
)
2468 regval
|= BIT_ULL(4);
2470 rvu_write64(rvu
, blkaddr
,
2471 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf
, req
->rx
.vtag_type
), regval
);
2475 static int nix_tx_vtag_free(struct rvu
*rvu
, int blkaddr
,
2476 u16 pcifunc
, int index
)
2478 struct nix_hw
*nix_hw
= get_nix_hw(rvu
->hw
, blkaddr
);
2479 struct nix_txvlan
*vlan
;
2482 return NIX_AF_ERR_INVALID_NIXBLK
;
2484 vlan
= &nix_hw
->txvlan
;
2485 if (vlan
->entry2pfvf_map
[index
] != pcifunc
)
2486 return NIX_AF_ERR_PARAM
;
2488 rvu_write64(rvu
, blkaddr
,
2489 NIX_AF_TX_VTAG_DEFX_DATA(index
), 0x0ull
);
2490 rvu_write64(rvu
, blkaddr
,
2491 NIX_AF_TX_VTAG_DEFX_CTL(index
), 0x0ull
);
2493 vlan
->entry2pfvf_map
[index
] = 0;
2494 rvu_free_rsrc(&vlan
->rsrc
, index
);
2499 static void nix_free_tx_vtag_entries(struct rvu
*rvu
, u16 pcifunc
)
2501 struct nix_txvlan
*vlan
;
2502 struct nix_hw
*nix_hw
;
2505 blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NIX
, pcifunc
);
2509 nix_hw
= get_nix_hw(rvu
->hw
, blkaddr
);
2513 vlan
= &nix_hw
->txvlan
;
2515 mutex_lock(&vlan
->rsrc_lock
);
2516 /* Scan all the entries and free the ones mapped to 'pcifunc' */
2517 for (index
= 0; index
< vlan
->rsrc
.max
; index
++) {
2518 if (vlan
->entry2pfvf_map
[index
] == pcifunc
)
2519 nix_tx_vtag_free(rvu
, blkaddr
, pcifunc
, index
);
2521 mutex_unlock(&vlan
->rsrc_lock
);
2524 static int nix_tx_vtag_alloc(struct rvu
*rvu
, int blkaddr
,
2527 struct nix_hw
*nix_hw
= get_nix_hw(rvu
->hw
, blkaddr
);
2528 struct nix_txvlan
*vlan
;
2533 return NIX_AF_ERR_INVALID_NIXBLK
;
2535 vlan
= &nix_hw
->txvlan
;
2537 mutex_lock(&vlan
->rsrc_lock
);
2539 index
= rvu_alloc_rsrc(&vlan
->rsrc
);
2541 mutex_unlock(&vlan
->rsrc_lock
);
2545 mutex_unlock(&vlan
->rsrc_lock
);
2547 regval
= size
? vtag
: vtag
<< 32;
2549 rvu_write64(rvu
, blkaddr
,
2550 NIX_AF_TX_VTAG_DEFX_DATA(index
), regval
);
2551 rvu_write64(rvu
, blkaddr
,
2552 NIX_AF_TX_VTAG_DEFX_CTL(index
), size
);
2557 static int nix_tx_vtag_decfg(struct rvu
*rvu
, int blkaddr
,
2558 struct nix_vtag_config
*req
)
2560 struct nix_hw
*nix_hw
= get_nix_hw(rvu
->hw
, blkaddr
);
2561 u16 pcifunc
= req
->hdr
.pcifunc
;
2562 int idx0
= req
->tx
.vtag0_idx
;
2563 int idx1
= req
->tx
.vtag1_idx
;
2564 struct nix_txvlan
*vlan
;
2568 return NIX_AF_ERR_INVALID_NIXBLK
;
2570 vlan
= &nix_hw
->txvlan
;
2571 if (req
->tx
.free_vtag0
&& req
->tx
.free_vtag1
)
2572 if (vlan
->entry2pfvf_map
[idx0
] != pcifunc
||
2573 vlan
->entry2pfvf_map
[idx1
] != pcifunc
)
2574 return NIX_AF_ERR_PARAM
;
2576 mutex_lock(&vlan
->rsrc_lock
);
2578 if (req
->tx
.free_vtag0
) {
2579 err
= nix_tx_vtag_free(rvu
, blkaddr
, pcifunc
, idx0
);
2584 if (req
->tx
.free_vtag1
)
2585 err
= nix_tx_vtag_free(rvu
, blkaddr
, pcifunc
, idx1
);
2588 mutex_unlock(&vlan
->rsrc_lock
);
2592 static int nix_tx_vtag_cfg(struct rvu
*rvu
, int blkaddr
,
2593 struct nix_vtag_config
*req
,
2594 struct nix_vtag_config_rsp
*rsp
)
2596 struct nix_hw
*nix_hw
= get_nix_hw(rvu
->hw
, blkaddr
);
2597 struct nix_txvlan
*vlan
;
2598 u16 pcifunc
= req
->hdr
.pcifunc
;
2601 return NIX_AF_ERR_INVALID_NIXBLK
;
2603 vlan
= &nix_hw
->txvlan
;
2604 if (req
->tx
.cfg_vtag0
) {
2606 nix_tx_vtag_alloc(rvu
, blkaddr
,
2607 req
->tx
.vtag0
, req
->vtag_size
);
2609 if (rsp
->vtag0_idx
< 0)
2610 return NIX_AF_ERR_TX_VTAG_NOSPC
;
2612 vlan
->entry2pfvf_map
[rsp
->vtag0_idx
] = pcifunc
;
2615 if (req
->tx
.cfg_vtag1
) {
2617 nix_tx_vtag_alloc(rvu
, blkaddr
,
2618 req
->tx
.vtag1
, req
->vtag_size
);
2620 if (rsp
->vtag1_idx
< 0)
2623 vlan
->entry2pfvf_map
[rsp
->vtag1_idx
] = pcifunc
;
2629 if (req
->tx
.cfg_vtag0
)
2630 nix_tx_vtag_free(rvu
, blkaddr
, pcifunc
, rsp
->vtag0_idx
);
2632 return NIX_AF_ERR_TX_VTAG_NOSPC
;
2635 int rvu_mbox_handler_nix_vtag_cfg(struct rvu
*rvu
,
2636 struct nix_vtag_config
*req
,
2637 struct nix_vtag_config_rsp
*rsp
)
2639 u16 pcifunc
= req
->hdr
.pcifunc
;
2640 int blkaddr
, nixlf
, err
;
2642 err
= nix_get_nixlf(rvu
, pcifunc
, &nixlf
, &blkaddr
);
2646 if (req
->cfg_type
) {
2647 /* rx vtag configuration */
2648 err
= nix_rx_vtag_cfg(rvu
, nixlf
, blkaddr
, req
);
2650 return NIX_AF_ERR_PARAM
;
2652 /* tx vtag configuration */
2653 if ((req
->tx
.cfg_vtag0
|| req
->tx
.cfg_vtag1
) &&
2654 (req
->tx
.free_vtag0
|| req
->tx
.free_vtag1
))
2655 return NIX_AF_ERR_PARAM
;
2657 if (req
->tx
.cfg_vtag0
|| req
->tx
.cfg_vtag1
)
2658 return nix_tx_vtag_cfg(rvu
, blkaddr
, req
, rsp
);
2660 if (req
->tx
.free_vtag0
|| req
->tx
.free_vtag1
)
2661 return nix_tx_vtag_decfg(rvu
, blkaddr
, req
);
2667 static int nix_blk_setup_mce(struct rvu
*rvu
, struct nix_hw
*nix_hw
,
2668 int mce
, u8 op
, u16 pcifunc
, int next
, bool eol
)
2670 struct nix_aq_enq_req aq_req
;
2673 aq_req
.hdr
.pcifunc
= 0;
2674 aq_req
.ctype
= NIX_AQ_CTYPE_MCE
;
2678 /* Use RSS with RSS index 0 */
2680 aq_req
.mce
.index
= 0;
2681 aq_req
.mce
.eol
= eol
;
2682 aq_req
.mce
.pf_func
= pcifunc
;
2683 aq_req
.mce
.next
= next
;
2685 /* All fields valid */
2686 *(u64
*)(&aq_req
.mce_mask
) = ~0ULL;
2688 err
= rvu_nix_blk_aq_enq_inst(rvu
, nix_hw
, &aq_req
, NULL
);
2690 dev_err(rvu
->dev
, "Failed to setup Bcast MCE for PF%d:VF%d\n",
2691 rvu_get_pf(pcifunc
), pcifunc
& RVU_PFVF_FUNC_MASK
);
2697 static int nix_update_mce_list_entry(struct nix_mce_list
*mce_list
,
2698 u16 pcifunc
, bool add
)
2700 struct mce
*mce
, *tail
= NULL
;
2701 bool delete = false;
2703 /* Scan through the current list */
2704 hlist_for_each_entry(mce
, &mce_list
->head
, node
) {
2705 /* If already exists, then delete */
2706 if (mce
->pcifunc
== pcifunc
&& !add
) {
2709 } else if (mce
->pcifunc
== pcifunc
&& add
) {
2710 /* entry already exists */
2717 hlist_del(&mce
->node
);
2726 /* Add a new one to the list, at the tail */
2727 mce
= kzalloc(sizeof(*mce
), GFP_KERNEL
);
2730 mce
->pcifunc
= pcifunc
;
2732 hlist_add_head(&mce
->node
, &mce_list
->head
);
2734 hlist_add_behind(&mce
->node
, &tail
->node
);
2739 int nix_update_mce_list(struct rvu
*rvu
, u16 pcifunc
,
2740 struct nix_mce_list
*mce_list
,
2741 int mce_idx
, int mcam_index
, bool add
)
2743 int err
= 0, idx
, next_idx
, last_idx
, blkaddr
, npc_blkaddr
;
2744 struct npc_mcam
*mcam
= &rvu
->hw
->mcam
;
2745 struct nix_mcast
*mcast
;
2746 struct nix_hw
*nix_hw
;
2752 /* Get this PF/VF func's MCE index */
2753 idx
= mce_idx
+ (pcifunc
& RVU_PFVF_FUNC_MASK
);
2755 if (idx
> (mce_idx
+ mce_list
->max
)) {
2757 "%s: Idx %d > max MCE idx %d, for PF%d bcast list\n",
2758 __func__
, idx
, mce_list
->max
,
2759 pcifunc
>> RVU_PFVF_PF_SHIFT
);
2763 err
= nix_get_struct_ptrs(rvu
, pcifunc
, &nix_hw
, &blkaddr
);
2767 mcast
= &nix_hw
->mcast
;
2768 mutex_lock(&mcast
->mce_lock
);
2770 err
= nix_update_mce_list_entry(mce_list
, pcifunc
, add
);
2774 /* Disable MCAM entry in NPC */
2775 if (!mce_list
->count
) {
2776 npc_blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NPC
, 0);
2777 npc_enable_mcam_entry(rvu
, mcam
, npc_blkaddr
, mcam_index
, false);
2781 /* Dump the updated list to HW */
2783 last_idx
= idx
+ mce_list
->count
- 1;
2784 hlist_for_each_entry(mce
, &mce_list
->head
, node
) {
2789 /* EOL should be set in last MCE */
2790 err
= nix_blk_setup_mce(rvu
, nix_hw
, idx
, NIX_AQ_INSTOP_WRITE
,
2791 mce
->pcifunc
, next_idx
,
2792 (next_idx
> last_idx
) ? true : false);
2799 mutex_unlock(&mcast
->mce_lock
);
2803 void nix_get_mce_list(struct rvu
*rvu
, u16 pcifunc
, int type
,
2804 struct nix_mce_list
**mce_list
, int *mce_idx
)
2806 struct rvu_hwinfo
*hw
= rvu
->hw
;
2807 struct rvu_pfvf
*pfvf
;
2809 if (!hw
->cap
.nix_rx_multicast
||
2810 !is_pf_cgxmapped(rvu
, rvu_get_pf(pcifunc
& ~RVU_PFVF_FUNC_MASK
))) {
2816 /* Get this PF/VF func's MCE index */
2817 pfvf
= rvu_get_pfvf(rvu
, pcifunc
& ~RVU_PFVF_FUNC_MASK
);
2819 if (type
== NIXLF_BCAST_ENTRY
) {
2820 *mce_list
= &pfvf
->bcast_mce_list
;
2821 *mce_idx
= pfvf
->bcast_mce_idx
;
2822 } else if (type
== NIXLF_ALLMULTI_ENTRY
) {
2823 *mce_list
= &pfvf
->mcast_mce_list
;
2824 *mce_idx
= pfvf
->mcast_mce_idx
;
2825 } else if (type
== NIXLF_PROMISC_ENTRY
) {
2826 *mce_list
= &pfvf
->promisc_mce_list
;
2827 *mce_idx
= pfvf
->promisc_mce_idx
;
2834 static int nix_update_mce_rule(struct rvu
*rvu
, u16 pcifunc
,
2837 int err
= 0, nixlf
, blkaddr
, mcam_index
, mce_idx
;
2838 struct npc_mcam
*mcam
= &rvu
->hw
->mcam
;
2839 struct rvu_hwinfo
*hw
= rvu
->hw
;
2840 struct nix_mce_list
*mce_list
;
2843 /* skip multicast pkt replication for AF's VFs & SDP links */
2844 if (is_afvf(pcifunc
) || is_sdp_pfvf(pcifunc
))
2847 if (!hw
->cap
.nix_rx_multicast
)
2850 pf
= rvu_get_pf(pcifunc
);
2851 if (!is_pf_cgxmapped(rvu
, pf
))
2854 blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NIX
, pcifunc
);
2858 nixlf
= rvu_get_lf(rvu
, &hw
->block
[blkaddr
], pcifunc
, 0);
2862 nix_get_mce_list(rvu
, pcifunc
, type
, &mce_list
, &mce_idx
);
2864 mcam_index
= npc_get_nixlf_mcam_index(mcam
,
2865 pcifunc
& ~RVU_PFVF_FUNC_MASK
,
2867 err
= nix_update_mce_list(rvu
, pcifunc
, mce_list
,
2868 mce_idx
, mcam_index
, add
);
2872 static int nix_setup_mce_tables(struct rvu
*rvu
, struct nix_hw
*nix_hw
)
2874 struct nix_mcast
*mcast
= &nix_hw
->mcast
;
2875 int err
, pf
, numvfs
, idx
;
2876 struct rvu_pfvf
*pfvf
;
2880 /* Skip PF0 (i.e AF) */
2881 for (pf
= 1; pf
< (rvu
->cgx_mapped_pfs
+ 1); pf
++) {
2882 cfg
= rvu_read64(rvu
, BLKADDR_RVUM
, RVU_PRIV_PFX_CFG(pf
));
2883 /* If PF is not enabled, nothing to do */
2884 if (!((cfg
>> 20) & 0x01))
2886 /* Get numVFs attached to this PF */
2887 numvfs
= (cfg
>> 12) & 0xFF;
2889 pfvf
= &rvu
->pf
[pf
];
2891 /* This NIX0/1 block mapped to PF ? */
2892 if (pfvf
->nix_blkaddr
!= nix_hw
->blkaddr
)
2895 /* save start idx of broadcast mce list */
2896 pfvf
->bcast_mce_idx
= nix_alloc_mce_list(mcast
, numvfs
+ 1);
2897 nix_mce_list_init(&pfvf
->bcast_mce_list
, numvfs
+ 1);
2899 /* save start idx of multicast mce list */
2900 pfvf
->mcast_mce_idx
= nix_alloc_mce_list(mcast
, numvfs
+ 1);
2901 nix_mce_list_init(&pfvf
->mcast_mce_list
, numvfs
+ 1);
2903 /* save the start idx of promisc mce list */
2904 pfvf
->promisc_mce_idx
= nix_alloc_mce_list(mcast
, numvfs
+ 1);
2905 nix_mce_list_init(&pfvf
->promisc_mce_list
, numvfs
+ 1);
2907 for (idx
= 0; idx
< (numvfs
+ 1); idx
++) {
2908 /* idx-0 is for PF, followed by VFs */
2909 pcifunc
= (pf
<< RVU_PFVF_PF_SHIFT
);
2911 /* Add dummy entries now, so that we don't have to check
2912 * for whether AQ_OP should be INIT/WRITE later on.
2913 * Will be updated when a NIXLF is attached/detached to
2916 err
= nix_blk_setup_mce(rvu
, nix_hw
,
2917 pfvf
->bcast_mce_idx
+ idx
,
2923 /* add dummy entries to multicast mce list */
2924 err
= nix_blk_setup_mce(rvu
, nix_hw
,
2925 pfvf
->mcast_mce_idx
+ idx
,
2931 /* add dummy entries to promisc mce list */
2932 err
= nix_blk_setup_mce(rvu
, nix_hw
,
2933 pfvf
->promisc_mce_idx
+ idx
,
2943 static int nix_setup_mcast(struct rvu
*rvu
, struct nix_hw
*nix_hw
, int blkaddr
)
2945 struct nix_mcast
*mcast
= &nix_hw
->mcast
;
2946 struct rvu_hwinfo
*hw
= rvu
->hw
;
2949 size
= (rvu_read64(rvu
, blkaddr
, NIX_AF_CONST3
) >> 16) & 0x0F;
2950 size
= (1ULL << size
);
2952 /* Alloc memory for multicast/mirror replication entries */
2953 err
= qmem_alloc(rvu
->dev
, &mcast
->mce_ctx
,
2954 (256UL << MC_TBL_SIZE
), size
);
2958 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_MCAST_BASE
,
2959 (u64
)mcast
->mce_ctx
->iova
);
2961 /* Set max list length equal to max no of VFs per PF + PF itself */
2962 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_MCAST_CFG
,
2963 BIT_ULL(36) | (hw
->max_vfs_per_pf
<< 4) | MC_TBL_SIZE
);
2965 /* Alloc memory for multicast replication buffers */
2966 size
= rvu_read64(rvu
, blkaddr
, NIX_AF_MC_MIRROR_CONST
) & 0xFFFF;
2967 err
= qmem_alloc(rvu
->dev
, &mcast
->mcast_buf
,
2968 (8UL << MC_BUF_CNT
), size
);
2972 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_MCAST_BUF_BASE
,
2973 (u64
)mcast
->mcast_buf
->iova
);
2975 /* Alloc pkind for NIX internal RX multicast/mirror replay */
2976 mcast
->replay_pkind
= rvu_alloc_rsrc(&hw
->pkind
.rsrc
);
2978 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_MCAST_BUF_CFG
,
2979 BIT_ULL(63) | (mcast
->replay_pkind
<< 24) |
2980 BIT_ULL(20) | MC_BUF_CNT
);
2982 mutex_init(&mcast
->mce_lock
);
2984 return nix_setup_mce_tables(rvu
, nix_hw
);
2987 static int nix_setup_txvlan(struct rvu
*rvu
, struct nix_hw
*nix_hw
)
2989 struct nix_txvlan
*vlan
= &nix_hw
->txvlan
;
2992 /* Allocate resource bimap for tx vtag def registers*/
2993 vlan
->rsrc
.max
= NIX_TX_VTAG_DEF_MAX
;
2994 err
= rvu_alloc_bitmap(&vlan
->rsrc
);
2998 /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */
2999 vlan
->entry2pfvf_map
= devm_kcalloc(rvu
->dev
, vlan
->rsrc
.max
,
3000 sizeof(u16
), GFP_KERNEL
);
3001 if (!vlan
->entry2pfvf_map
)
3004 mutex_init(&vlan
->rsrc_lock
);
3008 kfree(vlan
->rsrc
.bmap
);
3012 static int nix_setup_txschq(struct rvu
*rvu
, struct nix_hw
*nix_hw
, int blkaddr
)
3014 struct nix_txsch
*txsch
;
3018 /* Get scheduler queue count of each type and alloc
3019 * bitmap for each for alloc/free/attach operations.
3021 for (lvl
= 0; lvl
< NIX_TXSCH_LVL_CNT
; lvl
++) {
3022 txsch
= &nix_hw
->txsch
[lvl
];
3025 case NIX_TXSCH_LVL_SMQ
:
3026 reg
= NIX_AF_MDQ_CONST
;
3028 case NIX_TXSCH_LVL_TL4
:
3029 reg
= NIX_AF_TL4_CONST
;
3031 case NIX_TXSCH_LVL_TL3
:
3032 reg
= NIX_AF_TL3_CONST
;
3034 case NIX_TXSCH_LVL_TL2
:
3035 reg
= NIX_AF_TL2_CONST
;
3037 case NIX_TXSCH_LVL_TL1
:
3038 reg
= NIX_AF_TL1_CONST
;
3041 cfg
= rvu_read64(rvu
, blkaddr
, reg
);
3042 txsch
->schq
.max
= cfg
& 0xFFFF;
3043 err
= rvu_alloc_bitmap(&txsch
->schq
);
3047 /* Allocate memory for scheduler queues to
3048 * PF/VF pcifunc mapping info.
3050 txsch
->pfvf_map
= devm_kcalloc(rvu
->dev
, txsch
->schq
.max
,
3051 sizeof(u32
), GFP_KERNEL
);
3052 if (!txsch
->pfvf_map
)
3054 for (schq
= 0; schq
< txsch
->schq
.max
; schq
++)
3055 txsch
->pfvf_map
[schq
] = TXSCH_MAP(0, NIX_TXSCHQ_FREE
);
3058 /* Setup a default value of 8192 as DWRR MTU */
3059 if (rvu
->hw
->cap
.nix_common_dwrr_mtu
) {
3060 rvu_write64(rvu
, blkaddr
, NIX_AF_DWRR_RPM_MTU
,
3061 convert_bytes_to_dwrr_mtu(8192));
3062 rvu_write64(rvu
, blkaddr
, NIX_AF_DWRR_SDP_MTU
,
3063 convert_bytes_to_dwrr_mtu(8192));
3069 int rvu_nix_reserve_mark_format(struct rvu
*rvu
, struct nix_hw
*nix_hw
,
3070 int blkaddr
, u32 cfg
)
3074 for (fmt_idx
= 0; fmt_idx
< nix_hw
->mark_format
.in_use
; fmt_idx
++) {
3075 if (nix_hw
->mark_format
.cfg
[fmt_idx
] == cfg
)
3078 if (fmt_idx
>= nix_hw
->mark_format
.total
)
3081 rvu_write64(rvu
, blkaddr
, NIX_AF_MARK_FORMATX_CTL(fmt_idx
), cfg
);
3082 nix_hw
->mark_format
.cfg
[fmt_idx
] = cfg
;
3083 nix_hw
->mark_format
.in_use
++;
3087 static int nix_af_mark_format_setup(struct rvu
*rvu
, struct nix_hw
*nix_hw
,
3091 [NIX_MARK_CFG_IP_DSCP_RED
] = 0x10003,
3092 [NIX_MARK_CFG_IP_DSCP_YELLOW
] = 0x11200,
3093 [NIX_MARK_CFG_IP_DSCP_YELLOW_RED
] = 0x11203,
3094 [NIX_MARK_CFG_IP_ECN_RED
] = 0x6000c,
3095 [NIX_MARK_CFG_IP_ECN_YELLOW
] = 0x60c00,
3096 [NIX_MARK_CFG_IP_ECN_YELLOW_RED
] = 0x60c0c,
3097 [NIX_MARK_CFG_VLAN_DEI_RED
] = 0x30008,
3098 [NIX_MARK_CFG_VLAN_DEI_YELLOW
] = 0x30800,
3099 [NIX_MARK_CFG_VLAN_DEI_YELLOW_RED
] = 0x30808,
3104 total
= (rvu_read64(rvu
, blkaddr
, NIX_AF_PSE_CONST
) & 0xFF00) >> 8;
3105 nix_hw
->mark_format
.total
= (u8
)total
;
3106 nix_hw
->mark_format
.cfg
= devm_kcalloc(rvu
->dev
, total
, sizeof(u32
),
3108 if (!nix_hw
->mark_format
.cfg
)
3110 for (i
= 0; i
< NIX_MARK_CFG_MAX
; i
++) {
3111 rc
= rvu_nix_reserve_mark_format(rvu
, nix_hw
, blkaddr
, cfgs
[i
]);
3113 dev_err(rvu
->dev
, "Err %d in setup mark format %d\n",
3120 static void rvu_get_lbk_link_max_frs(struct rvu
*rvu
, u16
*max_mtu
)
3122 /* CN10K supports LBK FIFO size 72 KB */
3123 if (rvu
->hw
->lbk_bufsize
== 0x12000)
3124 *max_mtu
= CN10K_LBK_LINK_MAX_FRS
;
3126 *max_mtu
= NIC_HW_MAX_FRS
;
3129 static void rvu_get_lmac_link_max_frs(struct rvu
*rvu
, u16
*max_mtu
)
3131 /* RPM supports FIFO len 128 KB */
3132 if (rvu_cgx_get_fifolen(rvu
) == 0x20000)
3133 *max_mtu
= CN10K_LMAC_LINK_MAX_FRS
;
3135 *max_mtu
= NIC_HW_MAX_FRS
;
3138 int rvu_mbox_handler_nix_get_hw_info(struct rvu
*rvu
, struct msg_req
*req
,
3139 struct nix_hw_info
*rsp
)
3141 u16 pcifunc
= req
->hdr
.pcifunc
;
3145 blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NIX
, pcifunc
);
3147 return NIX_AF_ERR_AF_LF_INVALID
;
3149 if (is_afvf(pcifunc
))
3150 rvu_get_lbk_link_max_frs(rvu
, &rsp
->max_mtu
);
3152 rvu_get_lmac_link_max_frs(rvu
, &rsp
->max_mtu
);
3154 rsp
->min_mtu
= NIC_HW_MIN_FRS
;
3156 if (!rvu
->hw
->cap
.nix_common_dwrr_mtu
) {
3157 /* Return '1' on OTx2 */
3158 rsp
->rpm_dwrr_mtu
= 1;
3159 rsp
->sdp_dwrr_mtu
= 1;
3163 dwrr_mtu
= rvu_read64(rvu
, BLKADDR_NIX0
, NIX_AF_DWRR_RPM_MTU
);
3164 rsp
->rpm_dwrr_mtu
= convert_dwrr_mtu_to_bytes(dwrr_mtu
);
3166 dwrr_mtu
= rvu_read64(rvu
, BLKADDR_NIX0
, NIX_AF_DWRR_SDP_MTU
);
3167 rsp
->sdp_dwrr_mtu
= convert_dwrr_mtu_to_bytes(dwrr_mtu
);
3172 int rvu_mbox_handler_nix_stats_rst(struct rvu
*rvu
, struct msg_req
*req
,
3173 struct msg_rsp
*rsp
)
3175 u16 pcifunc
= req
->hdr
.pcifunc
;
3176 int i
, nixlf
, blkaddr
, err
;
3179 err
= nix_get_nixlf(rvu
, pcifunc
, &nixlf
, &blkaddr
);
3183 /* Get stats count supported by HW */
3184 stats
= rvu_read64(rvu
, blkaddr
, NIX_AF_CONST1
);
3186 /* Reset tx stats */
3187 for (i
= 0; i
< ((stats
>> 24) & 0xFF); i
++)
3188 rvu_write64(rvu
, blkaddr
, NIX_AF_LFX_TX_STATX(nixlf
, i
), 0);
3190 /* Reset rx stats */
3191 for (i
= 0; i
< ((stats
>> 32) & 0xFF); i
++)
3192 rvu_write64(rvu
, blkaddr
, NIX_AF_LFX_RX_STATX(nixlf
, i
), 0);
3197 /* Returns the ALG index to be set into NPC_RX_ACTION */
3198 static int get_flowkey_alg_idx(struct nix_hw
*nix_hw
, u32 flow_cfg
)
3202 /* Scan over exiting algo entries to find a match */
3203 for (i
= 0; i
< nix_hw
->flowkey
.in_use
; i
++)
3204 if (nix_hw
->flowkey
.flowkey
[i
] == flow_cfg
)
3210 static int set_flowkey_fields(struct nix_rx_flowkey_alg
*alg
, u32 flow_cfg
)
3212 int idx
, nr_field
, key_off
, field_marker
, keyoff_marker
;
3213 int max_key_off
, max_bit_pos
, group_member
;
3214 struct nix_rx_flowkey_alg
*field
;
3215 struct nix_rx_flowkey_alg tmp
;
3216 u32 key_type
, valid_key
;
3217 int l4_key_offset
= 0;
3222 #define FIELDS_PER_ALG 5
3223 #define MAX_KEY_OFF 40
3224 /* Clear all fields */
3225 memset(alg
, 0, sizeof(uint64_t) * FIELDS_PER_ALG
);
3227 /* Each of the 32 possible flow key algorithm definitions should
3228 * fall into above incremental config (except ALG0). Otherwise a
3229 * single NPC MCAM entry is not sufficient for supporting RSS.
3231 * If a different definition or combination needed then NPC MCAM
3232 * has to be programmed to filter such pkts and it's action should
3233 * point to this definition to calculate flowtag or hash.
3235 * The `for loop` goes over _all_ protocol field and the following
3236 * variables depicts the state machine forward progress logic.
3238 * keyoff_marker - Enabled when hash byte length needs to be accounted
3239 * in field->key_offset update.
3240 * field_marker - Enabled when a new field needs to be selected.
3241 * group_member - Enabled when protocol is part of a group.
3244 keyoff_marker
= 0; max_key_off
= 0; group_member
= 0;
3245 nr_field
= 0; key_off
= 0; field_marker
= 1;
3246 field
= &tmp
; max_bit_pos
= fls(flow_cfg
);
3248 idx
< max_bit_pos
&& nr_field
< FIELDS_PER_ALG
&&
3249 key_off
< MAX_KEY_OFF
; idx
++) {
3250 key_type
= BIT(idx
);
3251 valid_key
= flow_cfg
& key_type
;
3252 /* Found a field marker, reset the field values */
3254 memset(&tmp
, 0, sizeof(tmp
));
3256 field_marker
= true;
3257 keyoff_marker
= true;
3259 case NIX_FLOW_KEY_TYPE_PORT
:
3260 field
->sel_chan
= true;
3261 /* This should be set to 1, when SEL_CHAN is set */
3264 case NIX_FLOW_KEY_TYPE_IPV4_PROTO
:
3265 field
->lid
= NPC_LID_LC
;
3266 field
->hdr_offset
= 9; /* offset */
3267 field
->bytesm1
= 0; /* 1 byte */
3268 field
->ltype_match
= NPC_LT_LC_IP
;
3269 field
->ltype_mask
= 0xF;
3271 case NIX_FLOW_KEY_TYPE_IPV4
:
3272 case NIX_FLOW_KEY_TYPE_INNR_IPV4
:
3273 field
->lid
= NPC_LID_LC
;
3274 field
->ltype_match
= NPC_LT_LC_IP
;
3275 if (key_type
== NIX_FLOW_KEY_TYPE_INNR_IPV4
) {
3276 field
->lid
= NPC_LID_LG
;
3277 field
->ltype_match
= NPC_LT_LG_TU_IP
;
3279 field
->hdr_offset
= 12; /* SIP offset */
3280 field
->bytesm1
= 7; /* SIP + DIP, 8 bytes */
3281 field
->ltype_mask
= 0xF; /* Match only IPv4 */
3282 keyoff_marker
= false;
3284 case NIX_FLOW_KEY_TYPE_IPV6
:
3285 case NIX_FLOW_KEY_TYPE_INNR_IPV6
:
3286 field
->lid
= NPC_LID_LC
;
3287 field
->ltype_match
= NPC_LT_LC_IP6
;
3288 if (key_type
== NIX_FLOW_KEY_TYPE_INNR_IPV6
) {
3289 field
->lid
= NPC_LID_LG
;
3290 field
->ltype_match
= NPC_LT_LG_TU_IP6
;
3292 field
->hdr_offset
= 8; /* SIP offset */
3293 field
->bytesm1
= 31; /* SIP + DIP, 32 bytes */
3294 field
->ltype_mask
= 0xF; /* Match only IPv6 */
3296 case NIX_FLOW_KEY_TYPE_TCP
:
3297 case NIX_FLOW_KEY_TYPE_UDP
:
3298 case NIX_FLOW_KEY_TYPE_SCTP
:
3299 case NIX_FLOW_KEY_TYPE_INNR_TCP
:
3300 case NIX_FLOW_KEY_TYPE_INNR_UDP
:
3301 case NIX_FLOW_KEY_TYPE_INNR_SCTP
:
3302 field
->lid
= NPC_LID_LD
;
3303 if (key_type
== NIX_FLOW_KEY_TYPE_INNR_TCP
||
3304 key_type
== NIX_FLOW_KEY_TYPE_INNR_UDP
||
3305 key_type
== NIX_FLOW_KEY_TYPE_INNR_SCTP
)
3306 field
->lid
= NPC_LID_LH
;
3307 field
->bytesm1
= 3; /* Sport + Dport, 4 bytes */
3309 /* Enum values for NPC_LID_LD and NPC_LID_LG are same,
3310 * so no need to change the ltype_match, just change
3311 * the lid for inner protocols
3313 BUILD_BUG_ON((int)NPC_LT_LD_TCP
!=
3314 (int)NPC_LT_LH_TU_TCP
);
3315 BUILD_BUG_ON((int)NPC_LT_LD_UDP
!=
3316 (int)NPC_LT_LH_TU_UDP
);
3317 BUILD_BUG_ON((int)NPC_LT_LD_SCTP
!=
3318 (int)NPC_LT_LH_TU_SCTP
);
3320 if ((key_type
== NIX_FLOW_KEY_TYPE_TCP
||
3321 key_type
== NIX_FLOW_KEY_TYPE_INNR_TCP
) &&
3323 field
->ltype_match
|= NPC_LT_LD_TCP
;
3324 group_member
= true;
3325 } else if ((key_type
== NIX_FLOW_KEY_TYPE_UDP
||
3326 key_type
== NIX_FLOW_KEY_TYPE_INNR_UDP
) &&
3328 field
->ltype_match
|= NPC_LT_LD_UDP
;
3329 group_member
= true;
3330 } else if ((key_type
== NIX_FLOW_KEY_TYPE_SCTP
||
3331 key_type
== NIX_FLOW_KEY_TYPE_INNR_SCTP
) &&
3333 field
->ltype_match
|= NPC_LT_LD_SCTP
;
3334 group_member
= true;
3336 field
->ltype_mask
= ~field
->ltype_match
;
3337 if (key_type
== NIX_FLOW_KEY_TYPE_SCTP
||
3338 key_type
== NIX_FLOW_KEY_TYPE_INNR_SCTP
) {
3339 /* Handle the case where any of the group item
3340 * is enabled in the group but not the final one
3344 group_member
= false;
3347 field_marker
= false;
3348 keyoff_marker
= false;
3351 /* TCP/UDP/SCTP and ESP/AH falls at same offset so
3352 * remember the TCP key offset of 40 byte hash key.
3354 if (key_type
== NIX_FLOW_KEY_TYPE_TCP
)
3355 l4_key_offset
= key_off
;
3357 case NIX_FLOW_KEY_TYPE_NVGRE
:
3358 field
->lid
= NPC_LID_LD
;
3359 field
->hdr_offset
= 4; /* VSID offset */
3361 field
->ltype_match
= NPC_LT_LD_NVGRE
;
3362 field
->ltype_mask
= 0xF;
3364 case NIX_FLOW_KEY_TYPE_VXLAN
:
3365 case NIX_FLOW_KEY_TYPE_GENEVE
:
3366 field
->lid
= NPC_LID_LE
;
3368 field
->hdr_offset
= 4;
3369 field
->ltype_mask
= 0xF;
3370 field_marker
= false;
3371 keyoff_marker
= false;
3373 if (key_type
== NIX_FLOW_KEY_TYPE_VXLAN
&& valid_key
) {
3374 field
->ltype_match
|= NPC_LT_LE_VXLAN
;
3375 group_member
= true;
3378 if (key_type
== NIX_FLOW_KEY_TYPE_GENEVE
&& valid_key
) {
3379 field
->ltype_match
|= NPC_LT_LE_GENEVE
;
3380 group_member
= true;
3383 if (key_type
== NIX_FLOW_KEY_TYPE_GENEVE
) {
3385 field
->ltype_mask
= ~field
->ltype_match
;
3386 field_marker
= true;
3387 keyoff_marker
= true;
3389 group_member
= false;
3393 case NIX_FLOW_KEY_TYPE_ETH_DMAC
:
3394 case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC
:
3395 field
->lid
= NPC_LID_LA
;
3396 field
->ltype_match
= NPC_LT_LA_ETHER
;
3397 if (key_type
== NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC
) {
3398 field
->lid
= NPC_LID_LF
;
3399 field
->ltype_match
= NPC_LT_LF_TU_ETHER
;
3401 field
->hdr_offset
= 0;
3402 field
->bytesm1
= 5; /* DMAC 6 Byte */
3403 field
->ltype_mask
= 0xF;
3405 case NIX_FLOW_KEY_TYPE_IPV6_EXT
:
3406 field
->lid
= NPC_LID_LC
;
3407 field
->hdr_offset
= 40; /* IPV6 hdr */
3408 field
->bytesm1
= 0; /* 1 Byte ext hdr*/
3409 field
->ltype_match
= NPC_LT_LC_IP6_EXT
;
3410 field
->ltype_mask
= 0xF;
3412 case NIX_FLOW_KEY_TYPE_GTPU
:
3413 field
->lid
= NPC_LID_LE
;
3414 field
->hdr_offset
= 4;
3415 field
->bytesm1
= 3; /* 4 bytes TID*/
3416 field
->ltype_match
= NPC_LT_LE_GTPU
;
3417 field
->ltype_mask
= 0xF;
3419 case NIX_FLOW_KEY_TYPE_VLAN
:
3420 field
->lid
= NPC_LID_LB
;
3421 field
->hdr_offset
= 2; /* Skip TPID (2-bytes) */
3422 field
->bytesm1
= 1; /* 2 Bytes (Actually 12 bits) */
3423 field
->ltype_match
= NPC_LT_LB_CTAG
;
3424 field
->ltype_mask
= 0xF;
3425 field
->fn_mask
= 1; /* Mask out the first nibble */
3427 case NIX_FLOW_KEY_TYPE_AH
:
3428 case NIX_FLOW_KEY_TYPE_ESP
:
3429 field
->hdr_offset
= 0;
3430 field
->bytesm1
= 7; /* SPI + sequence number */
3431 field
->ltype_mask
= 0xF;
3432 field
->lid
= NPC_LID_LE
;
3433 field
->ltype_match
= NPC_LT_LE_ESP
;
3434 if (key_type
== NIX_FLOW_KEY_TYPE_AH
) {
3435 field
->lid
= NPC_LID_LD
;
3436 field
->ltype_match
= NPC_LT_LD_AH
;
3437 field
->hdr_offset
= 4;
3438 keyoff_marker
= false;
3444 /* Found a valid flow key type */
3446 /* Use the key offset of TCP/UDP/SCTP fields
3447 * for ESP/AH fields.
3449 if (key_type
== NIX_FLOW_KEY_TYPE_ESP
||
3450 key_type
== NIX_FLOW_KEY_TYPE_AH
)
3451 key_off
= l4_key_offset
;
3452 field
->key_offset
= key_off
;
3453 memcpy(&alg
[nr_field
], field
, sizeof(*field
));
3454 max_key_off
= max(max_key_off
, field
->bytesm1
+ 1);
3456 /* Found a field marker, get the next field */
3461 /* Found a keyoff marker, update the new key_off */
3462 if (keyoff_marker
) {
3463 key_off
+= max_key_off
;
3467 /* Processed all the flow key types */
3468 if (idx
== max_bit_pos
&& key_off
<= MAX_KEY_OFF
)
3471 return NIX_AF_ERR_RSS_NOSPC_FIELD
;
3474 static int reserve_flowkey_alg_idx(struct rvu
*rvu
, int blkaddr
, u32 flow_cfg
)
3476 u64 field
[FIELDS_PER_ALG
];
3480 hw
= get_nix_hw(rvu
->hw
, blkaddr
);
3482 return NIX_AF_ERR_INVALID_NIXBLK
;
3484 /* No room to add new flow hash algoritham */
3485 if (hw
->flowkey
.in_use
>= NIX_FLOW_KEY_ALG_MAX
)
3486 return NIX_AF_ERR_RSS_NOSPC_ALGO
;
3488 /* Generate algo fields for the given flow_cfg */
3489 rc
= set_flowkey_fields((struct nix_rx_flowkey_alg
*)field
, flow_cfg
);
3493 /* Update ALGX_FIELDX register with generated fields */
3494 for (fid
= 0; fid
< FIELDS_PER_ALG
; fid
++)
3495 rvu_write64(rvu
, blkaddr
,
3496 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw
->flowkey
.in_use
,
3499 /* Store the flow_cfg for futher lookup */
3500 rc
= hw
->flowkey
.in_use
;
3501 hw
->flowkey
.flowkey
[rc
] = flow_cfg
;
3502 hw
->flowkey
.in_use
++;
3507 int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu
*rvu
,
3508 struct nix_rss_flowkey_cfg
*req
,
3509 struct nix_rss_flowkey_cfg_rsp
*rsp
)
3511 u16 pcifunc
= req
->hdr
.pcifunc
;
3512 int alg_idx
, nixlf
, blkaddr
;
3513 struct nix_hw
*nix_hw
;
3516 err
= nix_get_nixlf(rvu
, pcifunc
, &nixlf
, &blkaddr
);
3520 nix_hw
= get_nix_hw(rvu
->hw
, blkaddr
);
3522 return NIX_AF_ERR_INVALID_NIXBLK
;
3524 alg_idx
= get_flowkey_alg_idx(nix_hw
, req
->flowkey_cfg
);
3525 /* Failed to get algo index from the exiting list, reserve new */
3527 alg_idx
= reserve_flowkey_alg_idx(rvu
, blkaddr
,
3532 rsp
->alg_idx
= alg_idx
;
3533 rvu_npc_update_flowkey_alg_idx(rvu
, pcifunc
, nixlf
, req
->group
,
3534 alg_idx
, req
->mcam_index
);
3538 static int nix_rx_flowkey_alg_cfg(struct rvu
*rvu
, int blkaddr
)
3540 u32 flowkey_cfg
, minkey_cfg
;
3543 /* Disable all flow key algx fieldx */
3544 for (alg
= 0; alg
< NIX_FLOW_KEY_ALG_MAX
; alg
++) {
3545 for (fid
= 0; fid
< FIELDS_PER_ALG
; fid
++)
3546 rvu_write64(rvu
, blkaddr
,
3547 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg
, fid
),
3551 /* IPv4/IPv6 SIP/DIPs */
3552 flowkey_cfg
= NIX_FLOW_KEY_TYPE_IPV4
| NIX_FLOW_KEY_TYPE_IPV6
;
3553 rc
= reserve_flowkey_alg_idx(rvu
, blkaddr
, flowkey_cfg
);
3557 /* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
3558 minkey_cfg
= flowkey_cfg
;
3559 flowkey_cfg
= minkey_cfg
| NIX_FLOW_KEY_TYPE_TCP
;
3560 rc
= reserve_flowkey_alg_idx(rvu
, blkaddr
, flowkey_cfg
);
3564 /* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
3565 flowkey_cfg
= minkey_cfg
| NIX_FLOW_KEY_TYPE_UDP
;
3566 rc
= reserve_flowkey_alg_idx(rvu
, blkaddr
, flowkey_cfg
);
3570 /* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
3571 flowkey_cfg
= minkey_cfg
| NIX_FLOW_KEY_TYPE_SCTP
;
3572 rc
= reserve_flowkey_alg_idx(rvu
, blkaddr
, flowkey_cfg
);
3576 /* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */
3577 flowkey_cfg
= minkey_cfg
| NIX_FLOW_KEY_TYPE_TCP
|
3578 NIX_FLOW_KEY_TYPE_UDP
;
3579 rc
= reserve_flowkey_alg_idx(rvu
, blkaddr
, flowkey_cfg
);
3583 /* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
3584 flowkey_cfg
= minkey_cfg
| NIX_FLOW_KEY_TYPE_TCP
|
3585 NIX_FLOW_KEY_TYPE_SCTP
;
3586 rc
= reserve_flowkey_alg_idx(rvu
, blkaddr
, flowkey_cfg
);
3590 /* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
3591 flowkey_cfg
= minkey_cfg
| NIX_FLOW_KEY_TYPE_UDP
|
3592 NIX_FLOW_KEY_TYPE_SCTP
;
3593 rc
= reserve_flowkey_alg_idx(rvu
, blkaddr
, flowkey_cfg
);
3597 /* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
3598 flowkey_cfg
= minkey_cfg
| NIX_FLOW_KEY_TYPE_TCP
|
3599 NIX_FLOW_KEY_TYPE_UDP
| NIX_FLOW_KEY_TYPE_SCTP
;
3600 rc
= reserve_flowkey_alg_idx(rvu
, blkaddr
, flowkey_cfg
);
3607 int rvu_mbox_handler_nix_set_mac_addr(struct rvu
*rvu
,
3608 struct nix_set_mac_addr
*req
,
3609 struct msg_rsp
*rsp
)
3611 bool from_vf
= req
->hdr
.pcifunc
& RVU_PFVF_FUNC_MASK
;
3612 u16 pcifunc
= req
->hdr
.pcifunc
;
3613 int blkaddr
, nixlf
, err
;
3614 struct rvu_pfvf
*pfvf
;
3616 err
= nix_get_nixlf(rvu
, pcifunc
, &nixlf
, &blkaddr
);
3620 pfvf
= rvu_get_pfvf(rvu
, pcifunc
);
3622 /* untrusted VF can't overwrite admin(PF) changes */
3623 if (!test_bit(PF_SET_VF_TRUSTED
, &pfvf
->flags
) &&
3624 (from_vf
&& test_bit(PF_SET_VF_MAC
, &pfvf
->flags
))) {
3626 "MAC address set by admin(PF) cannot be overwritten by untrusted VF");
3630 ether_addr_copy(pfvf
->mac_addr
, req
->mac_addr
);
3632 rvu_npc_install_ucast_entry(rvu
, pcifunc
, nixlf
,
3633 pfvf
->rx_chan_base
, req
->mac_addr
);
3635 if (test_bit(PF_SET_VF_TRUSTED
, &pfvf
->flags
) && from_vf
)
3636 ether_addr_copy(pfvf
->default_mac
, req
->mac_addr
);
3638 rvu_switch_update_rules(rvu
, pcifunc
);
3643 int rvu_mbox_handler_nix_get_mac_addr(struct rvu
*rvu
,
3644 struct msg_req
*req
,
3645 struct nix_get_mac_addr_rsp
*rsp
)
3647 u16 pcifunc
= req
->hdr
.pcifunc
;
3648 struct rvu_pfvf
*pfvf
;
3650 if (!is_nixlf_attached(rvu
, pcifunc
))
3651 return NIX_AF_ERR_AF_LF_INVALID
;
3653 pfvf
= rvu_get_pfvf(rvu
, pcifunc
);
3655 ether_addr_copy(rsp
->mac_addr
, pfvf
->mac_addr
);
3660 int rvu_mbox_handler_nix_set_rx_mode(struct rvu
*rvu
, struct nix_rx_mode
*req
,
3661 struct msg_rsp
*rsp
)
3663 bool allmulti
, promisc
, nix_rx_multicast
;
3664 u16 pcifunc
= req
->hdr
.pcifunc
;
3665 struct rvu_pfvf
*pfvf
;
3668 pfvf
= rvu_get_pfvf(rvu
, pcifunc
);
3669 promisc
= req
->mode
& NIX_RX_MODE_PROMISC
? true : false;
3670 allmulti
= req
->mode
& NIX_RX_MODE_ALLMULTI
? true : false;
3671 pfvf
->use_mce_list
= req
->mode
& NIX_RX_MODE_USE_MCE
? true : false;
3673 nix_rx_multicast
= rvu
->hw
->cap
.nix_rx_multicast
& pfvf
->use_mce_list
;
3675 if (is_vf(pcifunc
) && !nix_rx_multicast
&&
3676 (promisc
|| allmulti
)) {
3677 dev_warn_ratelimited(rvu
->dev
,
3678 "VF promisc/multicast not supported\n");
3682 /* untrusted VF can't configure promisc/allmulti */
3683 if (is_vf(pcifunc
) && !test_bit(PF_SET_VF_TRUSTED
, &pfvf
->flags
) &&
3684 (promisc
|| allmulti
))
3687 err
= nix_get_nixlf(rvu
, pcifunc
, &nixlf
, NULL
);
3691 if (nix_rx_multicast
) {
3692 /* add/del this PF_FUNC to/from mcast pkt replication list */
3693 err
= nix_update_mce_rule(rvu
, pcifunc
, NIXLF_ALLMULTI_ENTRY
,
3697 "Failed to update pcifunc 0x%x to multicast list\n",
3702 /* add/del this PF_FUNC to/from promisc pkt replication list */
3703 err
= nix_update_mce_rule(rvu
, pcifunc
, NIXLF_PROMISC_ENTRY
,
3707 "Failed to update pcifunc 0x%x to promisc list\n",
3713 /* install/uninstall allmulti entry */
3715 rvu_npc_install_allmulti_entry(rvu
, pcifunc
, nixlf
,
3716 pfvf
->rx_chan_base
);
3718 if (!nix_rx_multicast
)
3719 rvu_npc_enable_allmulti_entry(rvu
, pcifunc
, nixlf
, false);
3722 /* install/uninstall promisc entry */
3724 rvu_npc_install_promisc_entry(rvu
, pcifunc
, nixlf
,
3728 if (!nix_rx_multicast
)
3729 rvu_npc_enable_promisc_entry(rvu
, pcifunc
, nixlf
, false);
3735 static void nix_find_link_frs(struct rvu
*rvu
,
3736 struct nix_frs_cfg
*req
, u16 pcifunc
)
3738 int pf
= rvu_get_pf(pcifunc
);
3739 struct rvu_pfvf
*pfvf
;
3744 /* Update with requester's min/max lengths */
3745 pfvf
= rvu_get_pfvf(rvu
, pcifunc
);
3746 pfvf
->maxlen
= req
->maxlen
;
3747 if (req
->update_minlen
)
3748 pfvf
->minlen
= req
->minlen
;
3750 maxlen
= req
->maxlen
;
3751 minlen
= req
->update_minlen
? req
->minlen
: 0;
3753 /* Get this PF's numVFs and starting hwvf */
3754 rvu_get_pf_numvfs(rvu
, pf
, &numvfs
, &hwvf
);
3756 /* For each VF, compare requested max/minlen */
3757 for (vf
= 0; vf
< numvfs
; vf
++) {
3758 pfvf
= &rvu
->hwvf
[hwvf
+ vf
];
3759 if (pfvf
->maxlen
> maxlen
)
3760 maxlen
= pfvf
->maxlen
;
3761 if (req
->update_minlen
&&
3762 pfvf
->minlen
&& pfvf
->minlen
< minlen
)
3763 minlen
= pfvf
->minlen
;
3766 /* Compare requested max/minlen with PF's max/minlen */
3767 pfvf
= &rvu
->pf
[pf
];
3768 if (pfvf
->maxlen
> maxlen
)
3769 maxlen
= pfvf
->maxlen
;
3770 if (req
->update_minlen
&&
3771 pfvf
->minlen
&& pfvf
->minlen
< minlen
)
3772 minlen
= pfvf
->minlen
;
3774 /* Update the request with max/min PF's and it's VF's max/min */
3775 req
->maxlen
= maxlen
;
3776 if (req
->update_minlen
)
3777 req
->minlen
= minlen
;
3781 nix_config_link_credits(struct rvu
*rvu
, int blkaddr
, int link
,
3782 u16 pcifunc
, u64 tx_credits
)
3784 struct rvu_hwinfo
*hw
= rvu
->hw
;
3785 int pf
= rvu_get_pf(pcifunc
);
3786 u8 cgx_id
= 0, lmac_id
= 0;
3787 unsigned long poll_tmo
;
3788 bool restore_tx_en
= 0;
3789 struct nix_hw
*nix_hw
;
3790 u64 cfg
, sw_xoff
= 0;
3795 nix_hw
= get_nix_hw(rvu
->hw
, blkaddr
);
3797 return NIX_AF_ERR_INVALID_NIXBLK
;
3799 if (tx_credits
== nix_hw
->tx_credits
[link
])
3802 /* Enable cgx tx if disabled for credits to be back */
3803 if (is_pf_cgxmapped(rvu
, pf
)) {
3804 rvu_get_cgx_lmac_id(rvu
->pf2cgxlmac_map
[pf
], &cgx_id
, &lmac_id
);
3805 restore_tx_en
= !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id
, rvu
),
3809 mutex_lock(&rvu
->rsrc_lock
);
3810 /* Disable new traffic to link */
3811 if (hw
->cap
.nix_shaping
) {
3812 schq
= nix_get_tx_link(rvu
, pcifunc
);
3813 sw_xoff
= rvu_read64(rvu
, blkaddr
, NIX_AF_TL1X_SW_XOFF(schq
));
3814 rvu_write64(rvu
, blkaddr
,
3815 NIX_AF_TL1X_SW_XOFF(schq
), BIT_ULL(0));
3819 poll_tmo
= jiffies
+ usecs_to_jiffies(10000);
3820 /* Wait for credits to return */
3822 if (time_after(jiffies
, poll_tmo
))
3824 usleep_range(100, 200);
3826 cfg
= rvu_read64(rvu
, blkaddr
,
3827 NIX_AF_TX_LINKX_NORM_CREDIT(link
));
3828 credits
= (cfg
>> 12) & 0xFFFFFULL
;
3829 } while (credits
!= nix_hw
->tx_credits
[link
]);
3831 cfg
&= ~(0xFFFFFULL
<< 12);
3832 cfg
|= (tx_credits
<< 12);
3833 rvu_write64(rvu
, blkaddr
, NIX_AF_TX_LINKX_NORM_CREDIT(link
), cfg
);
3836 nix_hw
->tx_credits
[link
] = tx_credits
;
3839 /* Enable traffic back */
3840 if (hw
->cap
.nix_shaping
&& !sw_xoff
)
3841 rvu_write64(rvu
, blkaddr
, NIX_AF_TL1X_SW_XOFF(schq
), 0);
3843 /* Restore state of cgx tx */
3845 cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id
, rvu
), lmac_id
, false);
3847 mutex_unlock(&rvu
->rsrc_lock
);
3851 int rvu_mbox_handler_nix_set_hw_frs(struct rvu
*rvu
, struct nix_frs_cfg
*req
,
3852 struct msg_rsp
*rsp
)
3854 struct rvu_hwinfo
*hw
= rvu
->hw
;
3855 u16 pcifunc
= req
->hdr
.pcifunc
;
3856 int pf
= rvu_get_pf(pcifunc
);
3857 int blkaddr
, schq
, link
= -1;
3858 struct nix_txsch
*txsch
;
3859 u64 cfg
, lmac_fifo_len
;
3860 struct nix_hw
*nix_hw
;
3861 struct rvu_pfvf
*pfvf
;
3862 u8 cgx
= 0, lmac
= 0;
3865 blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NIX
, pcifunc
);
3867 return NIX_AF_ERR_AF_LF_INVALID
;
3869 nix_hw
= get_nix_hw(rvu
->hw
, blkaddr
);
3871 return NIX_AF_ERR_INVALID_NIXBLK
;
3873 if (is_afvf(pcifunc
))
3874 rvu_get_lbk_link_max_frs(rvu
, &max_mtu
);
3876 rvu_get_lmac_link_max_frs(rvu
, &max_mtu
);
3878 if (!req
->sdp_link
&& req
->maxlen
> max_mtu
)
3879 return NIX_AF_ERR_FRS_INVALID
;
3881 if (req
->update_minlen
&& req
->minlen
< NIC_HW_MIN_FRS
)
3882 return NIX_AF_ERR_FRS_INVALID
;
3884 /* Check if requester wants to update SMQ's */
3885 if (!req
->update_smq
)
3888 /* Update min/maxlen in each of the SMQ attached to this PF/VF */
3889 txsch
= &nix_hw
->txsch
[NIX_TXSCH_LVL_SMQ
];
3890 mutex_lock(&rvu
->rsrc_lock
);
3891 for (schq
= 0; schq
< txsch
->schq
.max
; schq
++) {
3892 if (TXSCH_MAP_FUNC(txsch
->pfvf_map
[schq
]) != pcifunc
)
3894 cfg
= rvu_read64(rvu
, blkaddr
, NIX_AF_SMQX_CFG(schq
));
3895 cfg
= (cfg
& ~(0xFFFFULL
<< 8)) | ((u64
)req
->maxlen
<< 8);
3896 if (req
->update_minlen
)
3897 cfg
= (cfg
& ~0x7FULL
) | ((u64
)req
->minlen
& 0x7F);
3898 rvu_write64(rvu
, blkaddr
, NIX_AF_SMQX_CFG(schq
), cfg
);
3900 mutex_unlock(&rvu
->rsrc_lock
);
3903 /* Check if config is for SDP link */
3904 if (req
->sdp_link
) {
3906 return NIX_AF_ERR_RX_LINK_INVALID
;
3907 link
= hw
->cgx_links
+ hw
->lbk_links
;
3911 /* Check if the request is from CGX mapped RVU PF */
3912 if (is_pf_cgxmapped(rvu
, pf
)) {
3913 /* Get CGX and LMAC to which this PF is mapped and find link */
3914 rvu_get_cgx_lmac_id(rvu
->pf2cgxlmac_map
[pf
], &cgx
, &lmac
);
3915 link
= (cgx
* hw
->lmac_per_cgx
) + lmac
;
3916 } else if (pf
== 0) {
3917 /* For VFs of PF0 ingress is LBK port, so config LBK link */
3918 pfvf
= rvu_get_pfvf(rvu
, pcifunc
);
3919 link
= hw
->cgx_links
+ pfvf
->lbkid
;
3923 return NIX_AF_ERR_RX_LINK_INVALID
;
3925 nix_find_link_frs(rvu
, req
, pcifunc
);
3928 cfg
= rvu_read64(rvu
, blkaddr
, NIX_AF_RX_LINKX_CFG(link
));
3929 cfg
= (cfg
& ~(0xFFFFULL
<< 16)) | ((u64
)req
->maxlen
<< 16);
3930 if (req
->update_minlen
)
3931 cfg
= (cfg
& ~0xFFFFULL
) | req
->minlen
;
3932 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_LINKX_CFG(link
), cfg
);
3934 if (req
->sdp_link
|| pf
== 0)
3937 /* Update transmit credits for CGX links */
3939 rvu_cgx_get_fifolen(rvu
) /
3940 cgx_get_lmac_cnt(rvu_cgx_pdata(cgx
, rvu
));
3941 return nix_config_link_credits(rvu
, blkaddr
, link
, pcifunc
,
3942 (lmac_fifo_len
- req
->maxlen
) / 16);
3945 int rvu_mbox_handler_nix_set_rx_cfg(struct rvu
*rvu
, struct nix_rx_cfg
*req
,
3946 struct msg_rsp
*rsp
)
3948 int nixlf
, blkaddr
, err
;
3951 err
= nix_get_nixlf(rvu
, req
->hdr
.pcifunc
, &nixlf
, &blkaddr
);
3955 cfg
= rvu_read64(rvu
, blkaddr
, NIX_AF_LFX_RX_CFG(nixlf
));
3956 /* Set the interface configuration */
3957 if (req
->len_verify
& BIT(0))
3960 cfg
&= ~BIT_ULL(41);
3962 if (req
->len_verify
& BIT(1))
3965 cfg
&= ~BIT_ULL(40);
3967 if (req
->csum_verify
& BIT(0))
3970 cfg
&= ~BIT_ULL(37);
3972 rvu_write64(rvu
, blkaddr
, NIX_AF_LFX_RX_CFG(nixlf
), cfg
);
3977 static u64
rvu_get_lbk_link_credits(struct rvu
*rvu
, u16 lbk_max_frs
)
3979 /* CN10k supports 72KB FIFO size and max packet size of 64k */
3980 if (rvu
->hw
->lbk_bufsize
== 0x12000)
3981 return (rvu
->hw
->lbk_bufsize
- lbk_max_frs
) / 16;
3983 return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */
3986 static void nix_link_config(struct rvu
*rvu
, int blkaddr
,
3987 struct nix_hw
*nix_hw
)
3989 struct rvu_hwinfo
*hw
= rvu
->hw
;
3990 int cgx
, lmac_cnt
, slink
, link
;
3991 u16 lbk_max_frs
, lmac_max_frs
;
3992 u64 tx_credits
, cfg
;
3994 rvu_get_lbk_link_max_frs(rvu
, &lbk_max_frs
);
3995 rvu_get_lmac_link_max_frs(rvu
, &lmac_max_frs
);
3997 /* Set default min/max packet lengths allowed on NIX Rx links.
3999 * With HW reset minlen value of 60byte, HW will treat ARP pkts
4000 * as undersize and report them to SW as error pkts, hence
4001 * setting it to 40 bytes.
4003 for (link
= 0; link
< hw
->cgx_links
; link
++) {
4004 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_LINKX_CFG(link
),
4005 ((u64
)lmac_max_frs
<< 16) | NIC_HW_MIN_FRS
);
4008 for (link
= hw
->cgx_links
; link
< hw
->lbk_links
; link
++) {
4009 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_LINKX_CFG(link
),
4010 ((u64
)lbk_max_frs
<< 16) | NIC_HW_MIN_FRS
);
4012 if (hw
->sdp_links
) {
4013 link
= hw
->cgx_links
+ hw
->lbk_links
;
4014 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_LINKX_CFG(link
),
4015 SDP_HW_MAX_FRS
<< 16 | NIC_HW_MIN_FRS
);
4018 /* Set credits for Tx links assuming max packet length allowed.
4019 * This will be reconfigured based on MTU set for PF/VF.
4021 for (cgx
= 0; cgx
< hw
->cgx
; cgx
++) {
4022 lmac_cnt
= cgx_get_lmac_cnt(rvu_cgx_pdata(cgx
, rvu
));
4023 /* Skip when cgx is not available or lmac cnt is zero */
4026 tx_credits
= ((rvu_cgx_get_fifolen(rvu
) / lmac_cnt
) -
4028 /* Enable credits and set credit pkt count to max allowed */
4029 cfg
= (tx_credits
<< 12) | (0x1FF << 2) | BIT_ULL(1);
4030 slink
= cgx
* hw
->lmac_per_cgx
;
4031 for (link
= slink
; link
< (slink
+ lmac_cnt
); link
++) {
4032 nix_hw
->tx_credits
[link
] = tx_credits
;
4033 rvu_write64(rvu
, blkaddr
,
4034 NIX_AF_TX_LINKX_NORM_CREDIT(link
), cfg
);
4038 /* Set Tx credits for LBK link */
4039 slink
= hw
->cgx_links
;
4040 for (link
= slink
; link
< (slink
+ hw
->lbk_links
); link
++) {
4041 tx_credits
= rvu_get_lbk_link_credits(rvu
, lbk_max_frs
);
4042 nix_hw
->tx_credits
[link
] = tx_credits
;
4043 /* Enable credits and set credit pkt count to max allowed */
4044 tx_credits
= (tx_credits
<< 12) | (0x1FF << 2) | BIT_ULL(1);
4045 rvu_write64(rvu
, blkaddr
,
4046 NIX_AF_TX_LINKX_NORM_CREDIT(link
), tx_credits
);
4050 static int nix_calibrate_x2p(struct rvu
*rvu
, int blkaddr
)
4055 /* Start X2P bus calibration */
4056 rvu_write64(rvu
, blkaddr
, NIX_AF_CFG
,
4057 rvu_read64(rvu
, blkaddr
, NIX_AF_CFG
) | BIT_ULL(9));
4058 /* Wait for calibration to complete */
4059 err
= rvu_poll_reg(rvu
, blkaddr
,
4060 NIX_AF_STATUS
, BIT_ULL(10), false);
4062 dev_err(rvu
->dev
, "NIX X2P bus calibration failed\n");
4066 status
= rvu_read64(rvu
, blkaddr
, NIX_AF_STATUS
);
4067 /* Check if CGX devices are ready */
4068 for (idx
= 0; idx
< rvu
->cgx_cnt_max
; idx
++) {
4069 /* Skip when cgx port is not available */
4070 if (!rvu_cgx_pdata(idx
, rvu
) ||
4071 (status
& (BIT_ULL(16 + idx
))))
4074 "CGX%d didn't respond to NIX X2P calibration\n", idx
);
4078 /* Check if LBK is ready */
4079 if (!(status
& BIT_ULL(19))) {
4081 "LBK didn't respond to NIX X2P calibration\n");
4085 /* Clear 'calibrate_x2p' bit */
4086 rvu_write64(rvu
, blkaddr
, NIX_AF_CFG
,
4087 rvu_read64(rvu
, blkaddr
, NIX_AF_CFG
) & ~BIT_ULL(9));
4088 if (err
|| (status
& 0x3FFULL
))
4090 "NIX X2P calibration failed, status 0x%llx\n", status
);
4096 static int nix_aq_init(struct rvu
*rvu
, struct rvu_block
*block
)
4101 /* Set admin queue endianness */
4102 cfg
= rvu_read64(rvu
, block
->addr
, NIX_AF_CFG
);
4105 rvu_write64(rvu
, block
->addr
, NIX_AF_CFG
, cfg
);
4108 rvu_write64(rvu
, block
->addr
, NIX_AF_CFG
, cfg
);
4111 /* Do not bypass NDC cache */
4112 cfg
= rvu_read64(rvu
, block
->addr
, NIX_AF_NDC_CFG
);
4114 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
4115 /* Disable caching of SQB aka SQEs */
4118 rvu_write64(rvu
, block
->addr
, NIX_AF_NDC_CFG
, cfg
);
4120 /* Result structure can be followed by RQ/SQ/CQ context at
4121 * RES + 128bytes and a write mask at RES + 256 bytes, depending on
4122 * operation type. Alloc sufficient result memory for all operations.
4124 err
= rvu_aq_alloc(rvu
, &block
->aq
,
4125 Q_COUNT(AQ_SIZE
), sizeof(struct nix_aq_inst_s
),
4126 ALIGN(sizeof(struct nix_aq_res_s
), 128) + 256);
4130 rvu_write64(rvu
, block
->addr
, NIX_AF_AQ_CFG
, AQ_SIZE
);
4131 rvu_write64(rvu
, block
->addr
,
4132 NIX_AF_AQ_BASE
, (u64
)block
->aq
->inst
->iova
);
4136 static void rvu_nix_setup_capabilities(struct rvu
*rvu
, int blkaddr
)
4138 struct rvu_hwinfo
*hw
= rvu
->hw
;
4141 hw_const
= rvu_read64(rvu
, blkaddr
, NIX_AF_CONST1
);
4143 /* On OcteonTx2 DWRR quantum is directly configured into each of
4144 * the transmit scheduler queues. And PF/VF drivers were free to
4145 * config any value upto 2^24.
4146 * On CN10K, HW is modified, the quantum configuration at scheduler
4147 * queues is in terms of weight. And SW needs to setup a base DWRR MTU
4148 * at NIX_AF_DWRR_RPM_MTU / NIX_AF_DWRR_SDP_MTU. HW will do
4149 * 'DWRR MTU * weight' to get the quantum.
4151 * Check if HW uses a common MTU for all DWRR quantum configs.
4152 * On OcteonTx2 this register field is '0'.
4154 if (((hw_const
>> 56) & 0x10) == 0x10)
4155 hw
->cap
.nix_common_dwrr_mtu
= true;
4158 static int rvu_nix_block_init(struct rvu
*rvu
, struct nix_hw
*nix_hw
)
4160 const struct npc_lt_def_cfg
*ltdefs
;
4161 struct rvu_hwinfo
*hw
= rvu
->hw
;
4162 int blkaddr
= nix_hw
->blkaddr
;
4163 struct rvu_block
*block
;
4167 block
= &hw
->block
[blkaddr
];
4169 if (is_rvu_96xx_B0(rvu
)) {
4170 /* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt
4171 * internal state when conditional clocks are turned off.
4172 * Hence enable them.
4174 rvu_write64(rvu
, blkaddr
, NIX_AF_CFG
,
4175 rvu_read64(rvu
, blkaddr
, NIX_AF_CFG
) | 0x40ULL
);
4177 /* Set chan/link to backpressure TL3 instead of TL2 */
4178 rvu_write64(rvu
, blkaddr
, NIX_AF_PSE_CHANNEL_LEVEL
, 0x01);
4180 /* Disable SQ manager's sticky mode operation (set TM6 = 0)
4181 * This sticky mode is known to cause SQ stalls when multiple
4182 * SQs are mapped to same SMQ and transmitting pkts at a time.
4184 cfg
= rvu_read64(rvu
, blkaddr
, NIX_AF_SQM_DBG_CTL_STATUS
);
4185 cfg
&= ~BIT_ULL(15);
4186 rvu_write64(rvu
, blkaddr
, NIX_AF_SQM_DBG_CTL_STATUS
, cfg
);
4189 ltdefs
= rvu
->kpu
.lt_def
;
4190 /* Calibrate X2P bus to check if CGX/LBK links are fine */
4191 err
= nix_calibrate_x2p(rvu
, blkaddr
);
4195 /* Setup capabilities of the NIX block */
4196 rvu_nix_setup_capabilities(rvu
, blkaddr
);
4198 /* Initialize admin queue */
4199 err
= nix_aq_init(rvu
, block
);
4203 /* Restore CINT timer delay to HW reset values */
4204 rvu_write64(rvu
, blkaddr
, NIX_AF_CINT_DELAY
, 0x0ULL
);
4206 /* For better performance use NDC TX instead of NDC RX for SQ's SQEs" */
4207 rvu_write64(rvu
, blkaddr
, NIX_AF_SEB_CFG
, 0x1ULL
);
4209 if (is_block_implemented(hw
, blkaddr
)) {
4210 err
= nix_setup_txschq(rvu
, nix_hw
, blkaddr
);
4214 err
= nix_setup_ipolicers(rvu
, nix_hw
, blkaddr
);
4218 err
= nix_af_mark_format_setup(rvu
, nix_hw
, blkaddr
);
4222 err
= nix_setup_mcast(rvu
, nix_hw
, blkaddr
);
4226 err
= nix_setup_txvlan(rvu
, nix_hw
);
4230 /* Configure segmentation offload formats */
4231 nix_setup_lso(rvu
, nix_hw
, blkaddr
);
4233 /* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info.
4234 * This helps HW protocol checker to identify headers
4235 * and validate length and checksums.
4237 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_DEF_OL2
,
4238 (ltdefs
->rx_ol2
.lid
<< 8) | (ltdefs
->rx_ol2
.ltype_match
<< 4) |
4239 ltdefs
->rx_ol2
.ltype_mask
);
4240 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_DEF_OIP4
,
4241 (ltdefs
->rx_oip4
.lid
<< 8) | (ltdefs
->rx_oip4
.ltype_match
<< 4) |
4242 ltdefs
->rx_oip4
.ltype_mask
);
4243 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_DEF_IIP4
,
4244 (ltdefs
->rx_iip4
.lid
<< 8) | (ltdefs
->rx_iip4
.ltype_match
<< 4) |
4245 ltdefs
->rx_iip4
.ltype_mask
);
4246 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_DEF_OIP6
,
4247 (ltdefs
->rx_oip6
.lid
<< 8) | (ltdefs
->rx_oip6
.ltype_match
<< 4) |
4248 ltdefs
->rx_oip6
.ltype_mask
);
4249 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_DEF_IIP6
,
4250 (ltdefs
->rx_iip6
.lid
<< 8) | (ltdefs
->rx_iip6
.ltype_match
<< 4) |
4251 ltdefs
->rx_iip6
.ltype_mask
);
4252 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_DEF_OTCP
,
4253 (ltdefs
->rx_otcp
.lid
<< 8) | (ltdefs
->rx_otcp
.ltype_match
<< 4) |
4254 ltdefs
->rx_otcp
.ltype_mask
);
4255 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_DEF_ITCP
,
4256 (ltdefs
->rx_itcp
.lid
<< 8) | (ltdefs
->rx_itcp
.ltype_match
<< 4) |
4257 ltdefs
->rx_itcp
.ltype_mask
);
4258 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_DEF_OUDP
,
4259 (ltdefs
->rx_oudp
.lid
<< 8) | (ltdefs
->rx_oudp
.ltype_match
<< 4) |
4260 ltdefs
->rx_oudp
.ltype_mask
);
4261 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_DEF_IUDP
,
4262 (ltdefs
->rx_iudp
.lid
<< 8) | (ltdefs
->rx_iudp
.ltype_match
<< 4) |
4263 ltdefs
->rx_iudp
.ltype_mask
);
4264 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_DEF_OSCTP
,
4265 (ltdefs
->rx_osctp
.lid
<< 8) | (ltdefs
->rx_osctp
.ltype_match
<< 4) |
4266 ltdefs
->rx_osctp
.ltype_mask
);
4267 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_DEF_ISCTP
,
4268 (ltdefs
->rx_isctp
.lid
<< 8) | (ltdefs
->rx_isctp
.ltype_match
<< 4) |
4269 ltdefs
->rx_isctp
.ltype_mask
);
4271 if (!is_rvu_otx2(rvu
)) {
4272 /* Enable APAD calculation for other protocols
4273 * matching APAD0 and APAD1 lt def registers.
4275 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_DEF_CST_APAD0
,
4276 (ltdefs
->rx_apad0
.valid
<< 11) |
4277 (ltdefs
->rx_apad0
.lid
<< 8) |
4278 (ltdefs
->rx_apad0
.ltype_match
<< 4) |
4279 ltdefs
->rx_apad0
.ltype_mask
);
4280 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_DEF_CST_APAD1
,
4281 (ltdefs
->rx_apad1
.valid
<< 11) |
4282 (ltdefs
->rx_apad1
.lid
<< 8) |
4283 (ltdefs
->rx_apad1
.ltype_match
<< 4) |
4284 ltdefs
->rx_apad1
.ltype_mask
);
4286 /* Receive ethertype defination register defines layer
4287 * information in NPC_RESULT_S to identify the Ethertype
4288 * location in L2 header. Used for Ethertype overwriting
4289 * in inline IPsec flow.
4291 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_DEF_ET(0),
4292 (ltdefs
->rx_et
[0].offset
<< 12) |
4293 (ltdefs
->rx_et
[0].valid
<< 11) |
4294 (ltdefs
->rx_et
[0].lid
<< 8) |
4295 (ltdefs
->rx_et
[0].ltype_match
<< 4) |
4296 ltdefs
->rx_et
[0].ltype_mask
);
4297 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_DEF_ET(1),
4298 (ltdefs
->rx_et
[1].offset
<< 12) |
4299 (ltdefs
->rx_et
[1].valid
<< 11) |
4300 (ltdefs
->rx_et
[1].lid
<< 8) |
4301 (ltdefs
->rx_et
[1].ltype_match
<< 4) |
4302 ltdefs
->rx_et
[1].ltype_mask
);
4305 err
= nix_rx_flowkey_alg_cfg(rvu
, blkaddr
);
4309 nix_hw
->tx_credits
= kcalloc(hw
->cgx_links
+ hw
->lbk_links
,
4310 sizeof(u64
), GFP_KERNEL
);
4311 if (!nix_hw
->tx_credits
)
4314 /* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */
4315 nix_link_config(rvu
, blkaddr
, nix_hw
);
4317 /* Enable Channel backpressure */
4318 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_CFG
, BIT_ULL(0));
4323 int rvu_nix_init(struct rvu
*rvu
)
4325 struct rvu_hwinfo
*hw
= rvu
->hw
;
4326 struct nix_hw
*nix_hw
;
4327 int blkaddr
= 0, err
;
4330 hw
->nix
= devm_kcalloc(rvu
->dev
, MAX_NIX_BLKS
, sizeof(struct nix_hw
),
4335 blkaddr
= rvu_get_next_nix_blkaddr(rvu
, blkaddr
);
4337 nix_hw
= &hw
->nix
[i
];
4339 nix_hw
->blkaddr
= blkaddr
;
4340 err
= rvu_nix_block_init(rvu
, nix_hw
);
4343 blkaddr
= rvu_get_next_nix_blkaddr(rvu
, blkaddr
);
4350 static void rvu_nix_block_freemem(struct rvu
*rvu
, int blkaddr
,
4351 struct rvu_block
*block
)
4353 struct nix_txsch
*txsch
;
4354 struct nix_mcast
*mcast
;
4355 struct nix_txvlan
*vlan
;
4356 struct nix_hw
*nix_hw
;
4359 rvu_aq_free(rvu
, block
->aq
);
4361 if (is_block_implemented(rvu
->hw
, blkaddr
)) {
4362 nix_hw
= get_nix_hw(rvu
->hw
, blkaddr
);
4366 for (lvl
= 0; lvl
< NIX_TXSCH_LVL_CNT
; lvl
++) {
4367 txsch
= &nix_hw
->txsch
[lvl
];
4368 kfree(txsch
->schq
.bmap
);
4371 kfree(nix_hw
->tx_credits
);
4373 nix_ipolicer_freemem(rvu
, nix_hw
);
4375 vlan
= &nix_hw
->txvlan
;
4376 kfree(vlan
->rsrc
.bmap
);
4377 mutex_destroy(&vlan
->rsrc_lock
);
4379 mcast
= &nix_hw
->mcast
;
4380 qmem_free(rvu
->dev
, mcast
->mce_ctx
);
4381 qmem_free(rvu
->dev
, mcast
->mcast_buf
);
4382 mutex_destroy(&mcast
->mce_lock
);
4386 void rvu_nix_freemem(struct rvu
*rvu
)
4388 struct rvu_hwinfo
*hw
= rvu
->hw
;
4389 struct rvu_block
*block
;
4392 blkaddr
= rvu_get_next_nix_blkaddr(rvu
, blkaddr
);
4394 block
= &hw
->block
[blkaddr
];
4395 rvu_nix_block_freemem(rvu
, blkaddr
, block
);
4396 blkaddr
= rvu_get_next_nix_blkaddr(rvu
, blkaddr
);
4400 int rvu_mbox_handler_nix_lf_start_rx(struct rvu
*rvu
, struct msg_req
*req
,
4401 struct msg_rsp
*rsp
)
4403 u16 pcifunc
= req
->hdr
.pcifunc
;
4404 struct rvu_pfvf
*pfvf
;
4407 err
= nix_get_nixlf(rvu
, pcifunc
, &nixlf
, NULL
);
4411 rvu_npc_enable_default_entries(rvu
, pcifunc
, nixlf
);
4413 npc_mcam_enable_flows(rvu
, pcifunc
);
4415 pfvf
= rvu_get_pfvf(rvu
, pcifunc
);
4416 set_bit(NIXLF_INITIALIZED
, &pfvf
->flags
);
4418 rvu_switch_update_rules(rvu
, pcifunc
);
4420 return rvu_cgx_start_stop_io(rvu
, pcifunc
, true);
4423 int rvu_mbox_handler_nix_lf_stop_rx(struct rvu
*rvu
, struct msg_req
*req
,
4424 struct msg_rsp
*rsp
)
4426 u16 pcifunc
= req
->hdr
.pcifunc
;
4427 struct rvu_pfvf
*pfvf
;
4430 err
= nix_get_nixlf(rvu
, pcifunc
, &nixlf
, NULL
);
4434 rvu_npc_disable_mcam_entries(rvu
, pcifunc
, nixlf
);
4436 pfvf
= rvu_get_pfvf(rvu
, pcifunc
);
4437 clear_bit(NIXLF_INITIALIZED
, &pfvf
->flags
);
4439 return rvu_cgx_start_stop_io(rvu
, pcifunc
, false);
4442 void rvu_nix_lf_teardown(struct rvu
*rvu
, u16 pcifunc
, int blkaddr
, int nixlf
)
4444 struct rvu_pfvf
*pfvf
= rvu_get_pfvf(rvu
, pcifunc
);
4445 struct hwctx_disable_req ctx_req
;
4448 ctx_req
.hdr
.pcifunc
= pcifunc
;
4450 /* Cleanup NPC MCAM entries, free Tx scheduler queues being used */
4451 rvu_npc_disable_mcam_entries(rvu
, pcifunc
, nixlf
);
4452 rvu_npc_free_mcam_entries(rvu
, pcifunc
, nixlf
);
4453 nix_interface_deinit(rvu
, pcifunc
, nixlf
);
4454 nix_rx_sync(rvu
, blkaddr
);
4455 nix_txschq_free(rvu
, pcifunc
);
4457 clear_bit(NIXLF_INITIALIZED
, &pfvf
->flags
);
4459 rvu_cgx_start_stop_io(rvu
, pcifunc
, false);
4462 ctx_req
.ctype
= NIX_AQ_CTYPE_SQ
;
4463 err
= nix_lf_hwctx_disable(rvu
, &ctx_req
);
4465 dev_err(rvu
->dev
, "SQ ctx disable failed\n");
4469 ctx_req
.ctype
= NIX_AQ_CTYPE_RQ
;
4470 err
= nix_lf_hwctx_disable(rvu
, &ctx_req
);
4472 dev_err(rvu
->dev
, "RQ ctx disable failed\n");
4476 ctx_req
.ctype
= NIX_AQ_CTYPE_CQ
;
4477 err
= nix_lf_hwctx_disable(rvu
, &ctx_req
);
4479 dev_err(rvu
->dev
, "CQ ctx disable failed\n");
4482 nix_ctx_free(rvu
, pfvf
);
4484 nix_free_all_bandprof(rvu
, pcifunc
);
4487 #define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32)
4489 static int rvu_nix_lf_ptp_tx_cfg(struct rvu
*rvu
, u16 pcifunc
, bool enable
)
4491 struct rvu_hwinfo
*hw
= rvu
->hw
;
4492 struct rvu_block
*block
;
4497 pf
= rvu_get_pf(pcifunc
);
4498 if (!is_mac_feature_supported(rvu
, pf
, RVU_LMAC_FEAT_PTP
))
4501 blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NIX
, pcifunc
);
4503 return NIX_AF_ERR_AF_LF_INVALID
;
4505 block
= &hw
->block
[blkaddr
];
4506 nixlf
= rvu_get_lf(rvu
, block
, pcifunc
, 0);
4508 return NIX_AF_ERR_AF_LF_INVALID
;
4510 cfg
= rvu_read64(rvu
, blkaddr
, NIX_AF_LFX_TX_CFG(nixlf
));
4513 cfg
|= NIX_AF_LFX_TX_CFG_PTP_EN
;
4515 cfg
&= ~NIX_AF_LFX_TX_CFG_PTP_EN
;
4517 rvu_write64(rvu
, blkaddr
, NIX_AF_LFX_TX_CFG(nixlf
), cfg
);
4522 int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu
*rvu
, struct msg_req
*req
,
4523 struct msg_rsp
*rsp
)
4525 return rvu_nix_lf_ptp_tx_cfg(rvu
, req
->hdr
.pcifunc
, true);
4528 int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu
*rvu
, struct msg_req
*req
,
4529 struct msg_rsp
*rsp
)
4531 return rvu_nix_lf_ptp_tx_cfg(rvu
, req
->hdr
.pcifunc
, false);
4534 int rvu_mbox_handler_nix_lso_format_cfg(struct rvu
*rvu
,
4535 struct nix_lso_format_cfg
*req
,
4536 struct nix_lso_format_cfg_rsp
*rsp
)
4538 u16 pcifunc
= req
->hdr
.pcifunc
;
4539 struct nix_hw
*nix_hw
;
4540 struct rvu_pfvf
*pfvf
;
4541 int blkaddr
, idx
, f
;
4544 pfvf
= rvu_get_pfvf(rvu
, pcifunc
);
4545 blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NIX
, pcifunc
);
4546 if (!pfvf
->nixlf
|| blkaddr
< 0)
4547 return NIX_AF_ERR_AF_LF_INVALID
;
4549 nix_hw
= get_nix_hw(rvu
->hw
, blkaddr
);
4551 return NIX_AF_ERR_INVALID_NIXBLK
;
4553 /* Find existing matching LSO format, if any */
4554 for (idx
= 0; idx
< nix_hw
->lso
.in_use
; idx
++) {
4555 for (f
= 0; f
< NIX_LSO_FIELD_MAX
; f
++) {
4556 reg
= rvu_read64(rvu
, blkaddr
,
4557 NIX_AF_LSO_FORMATX_FIELDX(idx
, f
));
4558 if (req
->fields
[f
] != (reg
& req
->field_mask
))
4562 if (f
== NIX_LSO_FIELD_MAX
)
4566 if (idx
< nix_hw
->lso
.in_use
) {
4568 rsp
->lso_format_idx
= idx
;
4572 if (nix_hw
->lso
.in_use
== nix_hw
->lso
.total
)
4573 return NIX_AF_ERR_LSO_CFG_FAIL
;
4575 rsp
->lso_format_idx
= nix_hw
->lso
.in_use
++;
4577 for (f
= 0; f
< NIX_LSO_FIELD_MAX
; f
++)
4578 rvu_write64(rvu
, blkaddr
,
4579 NIX_AF_LSO_FORMATX_FIELDX(rsp
->lso_format_idx
, f
),
4585 void rvu_nix_reset_mac(struct rvu_pfvf
*pfvf
, int pcifunc
)
4587 bool from_vf
= !!(pcifunc
& RVU_PFVF_FUNC_MASK
);
4589 /* overwrite vf mac address with default_mac */
4591 ether_addr_copy(pfvf
->mac_addr
, pfvf
->default_mac
);
4594 /* NIX ingress policers or bandwidth profiles APIs */
4595 static void nix_config_rx_pkt_policer_precolor(struct rvu
*rvu
, int blkaddr
)
4597 struct npc_lt_def_cfg defs
, *ltdefs
;
4600 memcpy(ltdefs
, rvu
->kpu
.lt_def
, sizeof(struct npc_lt_def_cfg
));
4602 /* Extract PCP and DEI fields from outer VLAN from byte offset
4603 * 2 from the start of LB_PTR (ie TAG).
4604 * VLAN0 is Outer VLAN and VLAN1 is Inner VLAN. Inner VLAN
4605 * fields are considered when 'Tunnel enable' is set in profile.
4607 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_DEF_VLAN0_PCP_DEI
,
4608 (2UL << 12) | (ltdefs
->ovlan
.lid
<< 8) |
4609 (ltdefs
->ovlan
.ltype_match
<< 4) |
4610 ltdefs
->ovlan
.ltype_mask
);
4611 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_DEF_VLAN1_PCP_DEI
,
4612 (2UL << 12) | (ltdefs
->ivlan
.lid
<< 8) |
4613 (ltdefs
->ivlan
.ltype_match
<< 4) |
4614 ltdefs
->ivlan
.ltype_mask
);
4616 /* DSCP field in outer and tunneled IPv4 packets */
4617 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_DEF_OIP4_DSCP
,
4618 (1UL << 12) | (ltdefs
->rx_oip4
.lid
<< 8) |
4619 (ltdefs
->rx_oip4
.ltype_match
<< 4) |
4620 ltdefs
->rx_oip4
.ltype_mask
);
4621 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_DEF_IIP4_DSCP
,
4622 (1UL << 12) | (ltdefs
->rx_iip4
.lid
<< 8) |
4623 (ltdefs
->rx_iip4
.ltype_match
<< 4) |
4624 ltdefs
->rx_iip4
.ltype_mask
);
4626 /* DSCP field (traffic class) in outer and tunneled IPv6 packets */
4627 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_DEF_OIP6_DSCP
,
4628 (1UL << 11) | (ltdefs
->rx_oip6
.lid
<< 8) |
4629 (ltdefs
->rx_oip6
.ltype_match
<< 4) |
4630 ltdefs
->rx_oip6
.ltype_mask
);
4631 rvu_write64(rvu
, blkaddr
, NIX_AF_RX_DEF_IIP6_DSCP
,
4632 (1UL << 11) | (ltdefs
->rx_iip6
.lid
<< 8) |
4633 (ltdefs
->rx_iip6
.ltype_match
<< 4) |
4634 ltdefs
->rx_iip6
.ltype_mask
);
4637 static int nix_init_policer_context(struct rvu
*rvu
, struct nix_hw
*nix_hw
,
4638 int layer
, int prof_idx
)
4640 struct nix_cn10k_aq_enq_req aq_req
;
4643 memset(&aq_req
, 0, sizeof(struct nix_cn10k_aq_enq_req
));
4645 aq_req
.qidx
= (prof_idx
& 0x3FFF) | (layer
<< 14);
4646 aq_req
.ctype
= NIX_AQ_CTYPE_BANDPROF
;
4647 aq_req
.op
= NIX_AQ_INSTOP_INIT
;
4649 /* Context is all zeros, submit to AQ */
4650 rc
= rvu_nix_blk_aq_enq_inst(rvu
, nix_hw
,
4651 (struct nix_aq_enq_req
*)&aq_req
, NULL
);
4653 dev_err(rvu
->dev
, "Failed to INIT bandwidth profile layer %d profile %d\n",
4658 static int nix_setup_ipolicers(struct rvu
*rvu
,
4659 struct nix_hw
*nix_hw
, int blkaddr
)
4661 struct rvu_hwinfo
*hw
= rvu
->hw
;
4662 struct nix_ipolicer
*ipolicer
;
4663 int err
, layer
, prof_idx
;
4666 cfg
= rvu_read64(rvu
, blkaddr
, NIX_AF_CONST
);
4667 if (!(cfg
& BIT_ULL(61))) {
4668 hw
->cap
.ipolicer
= false;
4672 hw
->cap
.ipolicer
= true;
4673 nix_hw
->ipolicer
= devm_kcalloc(rvu
->dev
, BAND_PROF_NUM_LAYERS
,
4674 sizeof(*ipolicer
), GFP_KERNEL
);
4675 if (!nix_hw
->ipolicer
)
4678 cfg
= rvu_read64(rvu
, blkaddr
, NIX_AF_PL_CONST
);
4680 for (layer
= 0; layer
< BAND_PROF_NUM_LAYERS
; layer
++) {
4681 ipolicer
= &nix_hw
->ipolicer
[layer
];
4683 case BAND_PROF_LEAF_LAYER
:
4684 ipolicer
->band_prof
.max
= cfg
& 0XFFFF;
4686 case BAND_PROF_MID_LAYER
:
4687 ipolicer
->band_prof
.max
= (cfg
>> 16) & 0XFFFF;
4689 case BAND_PROF_TOP_LAYER
:
4690 ipolicer
->band_prof
.max
= (cfg
>> 32) & 0XFFFF;
4694 if (!ipolicer
->band_prof
.max
)
4697 err
= rvu_alloc_bitmap(&ipolicer
->band_prof
);
4701 ipolicer
->pfvf_map
= devm_kcalloc(rvu
->dev
,
4702 ipolicer
->band_prof
.max
,
4703 sizeof(u16
), GFP_KERNEL
);
4704 if (!ipolicer
->pfvf_map
)
4707 ipolicer
->match_id
= devm_kcalloc(rvu
->dev
,
4708 ipolicer
->band_prof
.max
,
4709 sizeof(u16
), GFP_KERNEL
);
4710 if (!ipolicer
->match_id
)
4714 prof_idx
< ipolicer
->band_prof
.max
; prof_idx
++) {
4715 /* Set AF as current owner for INIT ops to succeed */
4716 ipolicer
->pfvf_map
[prof_idx
] = 0x00;
4718 /* There is no enable bit in the profile context,
4719 * so no context disable. So let's INIT them here
4720 * so that PF/VF later on have to just do WRITE to
4721 * setup policer rates and config.
4723 err
= nix_init_policer_context(rvu
, nix_hw
,
4729 /* Allocate memory for maintaining ref_counts for MID level
4730 * profiles, this will be needed for leaf layer profiles'
4733 if (layer
!= BAND_PROF_MID_LAYER
)
4736 ipolicer
->ref_count
= devm_kcalloc(rvu
->dev
,
4737 ipolicer
->band_prof
.max
,
4738 sizeof(u16
), GFP_KERNEL
);
4741 /* Set policer timeunit to 2us ie (19 + 1) * 100 nsec = 2us */
4742 rvu_write64(rvu
, blkaddr
, NIX_AF_PL_TS
, 19);
4744 nix_config_rx_pkt_policer_precolor(rvu
, blkaddr
);
4749 static void nix_ipolicer_freemem(struct rvu
*rvu
, struct nix_hw
*nix_hw
)
4751 struct nix_ipolicer
*ipolicer
;
4754 if (!rvu
->hw
->cap
.ipolicer
)
4757 for (layer
= 0; layer
< BAND_PROF_NUM_LAYERS
; layer
++) {
4758 ipolicer
= &nix_hw
->ipolicer
[layer
];
4760 if (!ipolicer
->band_prof
.max
)
4763 kfree(ipolicer
->band_prof
.bmap
);
4767 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req
*req
,
4768 struct nix_hw
*nix_hw
, u16 pcifunc
)
4770 struct nix_ipolicer
*ipolicer
;
4771 int layer
, hi_layer
, prof_idx
;
4773 /* Bits [15:14] in profile index represent layer */
4774 layer
= (req
->qidx
>> 14) & 0x03;
4775 prof_idx
= req
->qidx
& 0x3FFF;
4777 ipolicer
= &nix_hw
->ipolicer
[layer
];
4778 if (prof_idx
>= ipolicer
->band_prof
.max
)
4781 /* Check if the profile is allocated to the requesting PCIFUNC or not
4782 * with the exception of AF. AF is allowed to read and update contexts.
4784 if (pcifunc
&& ipolicer
->pfvf_map
[prof_idx
] != pcifunc
)
4787 /* If this profile is linked to higher layer profile then check
4788 * if that profile is also allocated to the requesting PCIFUNC
4791 if (!req
->prof
.hl_en
)
4794 /* Leaf layer profile can link only to mid layer and
4795 * mid layer to top layer.
4797 if (layer
== BAND_PROF_LEAF_LAYER
)
4798 hi_layer
= BAND_PROF_MID_LAYER
;
4799 else if (layer
== BAND_PROF_MID_LAYER
)
4800 hi_layer
= BAND_PROF_TOP_LAYER
;
4804 ipolicer
= &nix_hw
->ipolicer
[hi_layer
];
4805 prof_idx
= req
->prof
.band_prof_id
;
4806 if (prof_idx
>= ipolicer
->band_prof
.max
||
4807 ipolicer
->pfvf_map
[prof_idx
] != pcifunc
)
4813 int rvu_mbox_handler_nix_bandprof_alloc(struct rvu
*rvu
,
4814 struct nix_bandprof_alloc_req
*req
,
4815 struct nix_bandprof_alloc_rsp
*rsp
)
4817 int blkaddr
, layer
, prof
, idx
, err
;
4818 u16 pcifunc
= req
->hdr
.pcifunc
;
4819 struct nix_ipolicer
*ipolicer
;
4820 struct nix_hw
*nix_hw
;
4822 if (!rvu
->hw
->cap
.ipolicer
)
4823 return NIX_AF_ERR_IPOLICER_NOTSUPP
;
4825 err
= nix_get_struct_ptrs(rvu
, pcifunc
, &nix_hw
, &blkaddr
);
4829 mutex_lock(&rvu
->rsrc_lock
);
4830 for (layer
= 0; layer
< BAND_PROF_NUM_LAYERS
; layer
++) {
4831 if (layer
== BAND_PROF_INVAL_LAYER
)
4833 if (!req
->prof_count
[layer
])
4836 ipolicer
= &nix_hw
->ipolicer
[layer
];
4837 for (idx
= 0; idx
< req
->prof_count
[layer
]; idx
++) {
4838 /* Allocate a max of 'MAX_BANDPROF_PER_PFFUNC' profiles */
4839 if (idx
== MAX_BANDPROF_PER_PFFUNC
)
4842 prof
= rvu_alloc_rsrc(&ipolicer
->band_prof
);
4845 rsp
->prof_count
[layer
]++;
4846 rsp
->prof_idx
[layer
][idx
] = prof
;
4847 ipolicer
->pfvf_map
[prof
] = pcifunc
;
4850 mutex_unlock(&rvu
->rsrc_lock
);
4854 static int nix_free_all_bandprof(struct rvu
*rvu
, u16 pcifunc
)
4856 int blkaddr
, layer
, prof_idx
, err
;
4857 struct nix_ipolicer
*ipolicer
;
4858 struct nix_hw
*nix_hw
;
4860 if (!rvu
->hw
->cap
.ipolicer
)
4861 return NIX_AF_ERR_IPOLICER_NOTSUPP
;
4863 err
= nix_get_struct_ptrs(rvu
, pcifunc
, &nix_hw
, &blkaddr
);
4867 mutex_lock(&rvu
->rsrc_lock
);
4868 /* Free all the profiles allocated to the PCIFUNC */
4869 for (layer
= 0; layer
< BAND_PROF_NUM_LAYERS
; layer
++) {
4870 if (layer
== BAND_PROF_INVAL_LAYER
)
4872 ipolicer
= &nix_hw
->ipolicer
[layer
];
4874 for (prof_idx
= 0; prof_idx
< ipolicer
->band_prof
.max
; prof_idx
++) {
4875 if (ipolicer
->pfvf_map
[prof_idx
] != pcifunc
)
4878 /* Clear ratelimit aggregation, if any */
4879 if (layer
== BAND_PROF_LEAF_LAYER
&&
4880 ipolicer
->match_id
[prof_idx
])
4881 nix_clear_ratelimit_aggr(rvu
, nix_hw
, prof_idx
);
4883 ipolicer
->pfvf_map
[prof_idx
] = 0x00;
4884 ipolicer
->match_id
[prof_idx
] = 0;
4885 rvu_free_rsrc(&ipolicer
->band_prof
, prof_idx
);
4888 mutex_unlock(&rvu
->rsrc_lock
);
4892 int rvu_mbox_handler_nix_bandprof_free(struct rvu
*rvu
,
4893 struct nix_bandprof_free_req
*req
,
4894 struct msg_rsp
*rsp
)
4896 int blkaddr
, layer
, prof_idx
, idx
, err
;
4897 u16 pcifunc
= req
->hdr
.pcifunc
;
4898 struct nix_ipolicer
*ipolicer
;
4899 struct nix_hw
*nix_hw
;
4902 return nix_free_all_bandprof(rvu
, pcifunc
);
4904 if (!rvu
->hw
->cap
.ipolicer
)
4905 return NIX_AF_ERR_IPOLICER_NOTSUPP
;
4907 err
= nix_get_struct_ptrs(rvu
, pcifunc
, &nix_hw
, &blkaddr
);
4911 mutex_lock(&rvu
->rsrc_lock
);
4912 /* Free the requested profile indices */
4913 for (layer
= 0; layer
< BAND_PROF_NUM_LAYERS
; layer
++) {
4914 if (layer
== BAND_PROF_INVAL_LAYER
)
4916 if (!req
->prof_count
[layer
])
4919 ipolicer
= &nix_hw
->ipolicer
[layer
];
4920 for (idx
= 0; idx
< req
->prof_count
[layer
]; idx
++) {
4921 prof_idx
= req
->prof_idx
[layer
][idx
];
4922 if (prof_idx
>= ipolicer
->band_prof
.max
||
4923 ipolicer
->pfvf_map
[prof_idx
] != pcifunc
)
4926 /* Clear ratelimit aggregation, if any */
4927 if (layer
== BAND_PROF_LEAF_LAYER
&&
4928 ipolicer
->match_id
[prof_idx
])
4929 nix_clear_ratelimit_aggr(rvu
, nix_hw
, prof_idx
);
4931 ipolicer
->pfvf_map
[prof_idx
] = 0x00;
4932 ipolicer
->match_id
[prof_idx
] = 0;
4933 rvu_free_rsrc(&ipolicer
->band_prof
, prof_idx
);
4934 if (idx
== MAX_BANDPROF_PER_PFFUNC
)
4938 mutex_unlock(&rvu
->rsrc_lock
);
4942 int nix_aq_context_read(struct rvu
*rvu
, struct nix_hw
*nix_hw
,
4943 struct nix_cn10k_aq_enq_req
*aq_req
,
4944 struct nix_cn10k_aq_enq_rsp
*aq_rsp
,
4945 u16 pcifunc
, u8 ctype
, u32 qidx
)
4947 memset(aq_req
, 0, sizeof(struct nix_cn10k_aq_enq_req
));
4948 aq_req
->hdr
.pcifunc
= pcifunc
;
4949 aq_req
->ctype
= ctype
;
4950 aq_req
->op
= NIX_AQ_INSTOP_READ
;
4951 aq_req
->qidx
= qidx
;
4953 return rvu_nix_blk_aq_enq_inst(rvu
, nix_hw
,
4954 (struct nix_aq_enq_req
*)aq_req
,
4955 (struct nix_aq_enq_rsp
*)aq_rsp
);
4958 static int nix_ipolicer_map_leaf_midprofs(struct rvu
*rvu
,
4959 struct nix_hw
*nix_hw
,
4960 struct nix_cn10k_aq_enq_req
*aq_req
,
4961 struct nix_cn10k_aq_enq_rsp
*aq_rsp
,
4962 u32 leaf_prof
, u16 mid_prof
)
4964 memset(aq_req
, 0, sizeof(struct nix_cn10k_aq_enq_req
));
4965 aq_req
->hdr
.pcifunc
= 0x00;
4966 aq_req
->ctype
= NIX_AQ_CTYPE_BANDPROF
;
4967 aq_req
->op
= NIX_AQ_INSTOP_WRITE
;
4968 aq_req
->qidx
= leaf_prof
;
4970 aq_req
->prof
.band_prof_id
= mid_prof
;
4971 aq_req
->prof_mask
.band_prof_id
= GENMASK(6, 0);
4972 aq_req
->prof
.hl_en
= 1;
4973 aq_req
->prof_mask
.hl_en
= 1;
4975 return rvu_nix_blk_aq_enq_inst(rvu
, nix_hw
,
4976 (struct nix_aq_enq_req
*)aq_req
,
4977 (struct nix_aq_enq_rsp
*)aq_rsp
);
4980 int rvu_nix_setup_ratelimit_aggr(struct rvu
*rvu
, u16 pcifunc
,
4981 u16 rq_idx
, u16 match_id
)
4983 int leaf_prof
, mid_prof
, leaf_match
;
4984 struct nix_cn10k_aq_enq_req aq_req
;
4985 struct nix_cn10k_aq_enq_rsp aq_rsp
;
4986 struct nix_ipolicer
*ipolicer
;
4987 struct nix_hw
*nix_hw
;
4988 int blkaddr
, idx
, rc
;
4990 if (!rvu
->hw
->cap
.ipolicer
)
4993 rc
= nix_get_struct_ptrs(rvu
, pcifunc
, &nix_hw
, &blkaddr
);
4997 /* Fetch the RQ's context to see if policing is enabled */
4998 rc
= nix_aq_context_read(rvu
, nix_hw
, &aq_req
, &aq_rsp
, pcifunc
,
4999 NIX_AQ_CTYPE_RQ
, rq_idx
);
5002 "%s: Failed to fetch RQ%d context of PFFUNC 0x%x\n",
5003 __func__
, rq_idx
, pcifunc
);
5007 if (!aq_rsp
.rq
.policer_ena
)
5010 /* Get the bandwidth profile ID mapped to this RQ */
5011 leaf_prof
= aq_rsp
.rq
.band_prof_id
;
5013 ipolicer
= &nix_hw
->ipolicer
[BAND_PROF_LEAF_LAYER
];
5014 ipolicer
->match_id
[leaf_prof
] = match_id
;
5016 /* Check if any other leaf profile is marked with same match_id */
5017 for (idx
= 0; idx
< ipolicer
->band_prof
.max
; idx
++) {
5018 if (idx
== leaf_prof
)
5020 if (ipolicer
->match_id
[idx
] != match_id
)
5027 if (idx
== ipolicer
->band_prof
.max
)
5030 /* Fetch the matching profile's context to check if it's already
5031 * mapped to a mid level profile.
5033 rc
= nix_aq_context_read(rvu
, nix_hw
, &aq_req
, &aq_rsp
, 0x00,
5034 NIX_AQ_CTYPE_BANDPROF
, leaf_match
);
5037 "%s: Failed to fetch context of leaf profile %d\n",
5038 __func__
, leaf_match
);
5042 ipolicer
= &nix_hw
->ipolicer
[BAND_PROF_MID_LAYER
];
5043 if (aq_rsp
.prof
.hl_en
) {
5044 /* Get Mid layer prof index and map leaf_prof index
5045 * also such that flows that are being steered
5046 * to different RQs and marked with same match_id
5047 * are rate limited in a aggregate fashion
5049 mid_prof
= aq_rsp
.prof
.band_prof_id
;
5050 rc
= nix_ipolicer_map_leaf_midprofs(rvu
, nix_hw
,
5052 leaf_prof
, mid_prof
);
5055 "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
5056 __func__
, leaf_prof
, mid_prof
);
5060 mutex_lock(&rvu
->rsrc_lock
);
5061 ipolicer
->ref_count
[mid_prof
]++;
5062 mutex_unlock(&rvu
->rsrc_lock
);
5066 /* Allocate a mid layer profile and
5067 * map both 'leaf_prof' and 'leaf_match' profiles to it.
5069 mutex_lock(&rvu
->rsrc_lock
);
5070 mid_prof
= rvu_alloc_rsrc(&ipolicer
->band_prof
);
5073 "%s: Unable to allocate mid layer profile\n", __func__
);
5074 mutex_unlock(&rvu
->rsrc_lock
);
5077 mutex_unlock(&rvu
->rsrc_lock
);
5078 ipolicer
->pfvf_map
[mid_prof
] = 0x00;
5079 ipolicer
->ref_count
[mid_prof
] = 0;
5081 /* Initialize mid layer profile same as 'leaf_prof' */
5082 rc
= nix_aq_context_read(rvu
, nix_hw
, &aq_req
, &aq_rsp
, 0x00,
5083 NIX_AQ_CTYPE_BANDPROF
, leaf_prof
);
5086 "%s: Failed to fetch context of leaf profile %d\n",
5087 __func__
, leaf_prof
);
5091 memset(&aq_req
, 0, sizeof(struct nix_cn10k_aq_enq_req
));
5092 aq_req
.hdr
.pcifunc
= 0x00;
5093 aq_req
.qidx
= (mid_prof
& 0x3FFF) | (BAND_PROF_MID_LAYER
<< 14);
5094 aq_req
.ctype
= NIX_AQ_CTYPE_BANDPROF
;
5095 aq_req
.op
= NIX_AQ_INSTOP_WRITE
;
5096 memcpy(&aq_req
.prof
, &aq_rsp
.prof
, sizeof(struct nix_bandprof_s
));
5097 /* Clear higher layer enable bit in the mid profile, just in case */
5098 aq_req
.prof
.hl_en
= 0;
5099 aq_req
.prof_mask
.hl_en
= 1;
5101 rc
= rvu_nix_blk_aq_enq_inst(rvu
, nix_hw
,
5102 (struct nix_aq_enq_req
*)&aq_req
, NULL
);
5105 "%s: Failed to INIT context of mid layer profile %d\n",
5106 __func__
, mid_prof
);
5110 /* Map both leaf profiles to this mid layer profile */
5111 rc
= nix_ipolicer_map_leaf_midprofs(rvu
, nix_hw
,
5113 leaf_prof
, mid_prof
);
5116 "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
5117 __func__
, leaf_prof
, mid_prof
);
5121 mutex_lock(&rvu
->rsrc_lock
);
5122 ipolicer
->ref_count
[mid_prof
]++;
5123 mutex_unlock(&rvu
->rsrc_lock
);
5125 rc
= nix_ipolicer_map_leaf_midprofs(rvu
, nix_hw
,
5127 leaf_match
, mid_prof
);
5130 "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
5131 __func__
, leaf_match
, mid_prof
);
5132 ipolicer
->ref_count
[mid_prof
]--;
5136 mutex_lock(&rvu
->rsrc_lock
);
5137 ipolicer
->ref_count
[mid_prof
]++;
5138 mutex_unlock(&rvu
->rsrc_lock
);
5144 /* Called with mutex rsrc_lock */
5145 static void nix_clear_ratelimit_aggr(struct rvu
*rvu
, struct nix_hw
*nix_hw
,
5148 struct nix_cn10k_aq_enq_req aq_req
;
5149 struct nix_cn10k_aq_enq_rsp aq_rsp
;
5150 struct nix_ipolicer
*ipolicer
;
5154 mutex_unlock(&rvu
->rsrc_lock
);
5156 rc
= nix_aq_context_read(rvu
, nix_hw
, &aq_req
, &aq_rsp
, 0x00,
5157 NIX_AQ_CTYPE_BANDPROF
, leaf_prof
);
5159 mutex_lock(&rvu
->rsrc_lock
);
5162 "%s: Failed to fetch context of leaf profile %d\n",
5163 __func__
, leaf_prof
);
5167 if (!aq_rsp
.prof
.hl_en
)
5170 mid_prof
= aq_rsp
.prof
.band_prof_id
;
5171 ipolicer
= &nix_hw
->ipolicer
[BAND_PROF_MID_LAYER
];
5172 ipolicer
->ref_count
[mid_prof
]--;
5173 /* If ref_count is zero, free mid layer profile */
5174 if (!ipolicer
->ref_count
[mid_prof
]) {
5175 ipolicer
->pfvf_map
[mid_prof
] = 0x00;
5176 rvu_free_rsrc(&ipolicer
->band_prof
, mid_prof
);
5180 int rvu_mbox_handler_nix_bandprof_get_hwinfo(struct rvu
*rvu
, struct msg_req
*req
,
5181 struct nix_bandprof_get_hwinfo_rsp
*rsp
)
5183 struct nix_ipolicer
*ipolicer
;
5184 int blkaddr
, layer
, err
;
5185 struct nix_hw
*nix_hw
;
5188 if (!rvu
->hw
->cap
.ipolicer
)
5189 return NIX_AF_ERR_IPOLICER_NOTSUPP
;
5191 err
= nix_get_struct_ptrs(rvu
, req
->hdr
.pcifunc
, &nix_hw
, &blkaddr
);
5195 /* Return number of bandwidth profiles free at each layer */
5196 mutex_lock(&rvu
->rsrc_lock
);
5197 for (layer
= 0; layer
< BAND_PROF_NUM_LAYERS
; layer
++) {
5198 if (layer
== BAND_PROF_INVAL_LAYER
)
5201 ipolicer
= &nix_hw
->ipolicer
[layer
];
5202 rsp
->prof_count
[layer
] = rvu_rsrc_free_count(&ipolicer
->band_prof
);
5204 mutex_unlock(&rvu
->rsrc_lock
);
5206 /* Set the policer timeunit in nanosec */
5207 tu
= rvu_read64(rvu
, blkaddr
, NIX_AF_PL_TS
) & GENMASK_ULL(9, 0);
5208 rsp
->policer_timeunit
= (tu
+ 1) * 100;