1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
4 * Copyright (C) 2018 Marvell.
8 #include <linux/module.h>
9 #include <linux/interrupt.h>
10 #include <linux/delay.h>
11 #include <linux/irq.h>
12 #include <linux/pci.h>
13 #include <linux/sysfs.h>
20 #include "rvu_trace.h"
22 #define DRV_NAME "rvu_af"
23 #define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver"
25 static int rvu_get_hwvf(struct rvu
*rvu
, int pcifunc
);
27 static void rvu_set_msix_offset(struct rvu
*rvu
, struct rvu_pfvf
*pfvf
,
28 struct rvu_block
*block
, int lf
);
29 static void rvu_clear_msix_offset(struct rvu
*rvu
, struct rvu_pfvf
*pfvf
,
30 struct rvu_block
*block
, int lf
);
31 static void __rvu_flr_handler(struct rvu
*rvu
, u16 pcifunc
);
33 static int rvu_mbox_init(struct rvu
*rvu
, struct mbox_wq_info
*mw
,
35 void (mbox_handler
)(struct work_struct
*),
36 void (mbox_up_handler
)(struct work_struct
*));
42 /* Supported devices */
43 static const struct pci_device_id rvu_id_table
[] = {
44 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM
, PCI_DEVID_OCTEONTX2_RVU_AF
) },
45 { 0, } /* end of table */
48 MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
49 MODULE_DESCRIPTION(DRV_STRING
);
50 MODULE_LICENSE("GPL v2");
51 MODULE_DEVICE_TABLE(pci
, rvu_id_table
);
53 static char *mkex_profile
; /* MKEX profile name */
54 module_param(mkex_profile
, charp
, 0000);
55 MODULE_PARM_DESC(mkex_profile
, "MKEX profile name string");
57 static char *kpu_profile
; /* KPU profile name */
58 module_param(kpu_profile
, charp
, 0000);
59 MODULE_PARM_DESC(kpu_profile
, "KPU profile name string");
61 static void rvu_setup_hw_capabilities(struct rvu
*rvu
)
63 struct rvu_hwinfo
*hw
= rvu
->hw
;
65 hw
->cap
.nix_tx_aggr_lvl
= NIX_TXSCH_LVL_TL1
;
66 hw
->cap
.nix_fixed_txschq_mapping
= false;
67 hw
->cap
.nix_shaping
= true;
68 hw
->cap
.nix_tx_link_bp
= true;
69 hw
->cap
.nix_rx_multicast
= true;
70 hw
->cap
.nix_shaper_toggle_wait
= false;
73 if (is_rvu_pre_96xx_C0(rvu
)) {
74 hw
->cap
.nix_fixed_txschq_mapping
= true;
75 hw
->cap
.nix_txsch_per_cgx_lmac
= 4;
76 hw
->cap
.nix_txsch_per_lbk_lmac
= 132;
77 hw
->cap
.nix_txsch_per_sdp_lmac
= 76;
78 hw
->cap
.nix_shaping
= false;
79 hw
->cap
.nix_tx_link_bp
= false;
80 if (is_rvu_96xx_A0(rvu
) || is_rvu_95xx_A0(rvu
))
81 hw
->cap
.nix_rx_multicast
= false;
83 if (!is_rvu_pre_96xx_C0(rvu
))
84 hw
->cap
.nix_shaper_toggle_wait
= true;
86 if (!is_rvu_otx2(rvu
))
87 hw
->cap
.per_pf_mbox_regs
= true;
90 /* Poll a RVU block's register 'offset', for a 'zero'
91 * or 'nonzero' at bits specified by 'mask'
93 int rvu_poll_reg(struct rvu
*rvu
, u64 block
, u64 offset
, u64 mask
, bool zero
)
95 unsigned long timeout
= jiffies
+ usecs_to_jiffies(20000);
100 reg
= rvu
->afreg_base
+ ((block
<< 28) | offset
);
102 reg_val
= readq(reg
);
103 if (zero
&& !(reg_val
& mask
))
105 if (!zero
&& (reg_val
& mask
))
107 if (time_before(jiffies
, timeout
)) {
111 /* In scenarios where CPU is scheduled out before checking
112 * 'time_before' (above) and gets scheduled in such that
113 * jiffies are beyond timeout value, then check again if HW is
114 * done with the operation in the meantime.
123 int rvu_alloc_rsrc(struct rsrc_bmap
*rsrc
)
130 id
= find_first_zero_bit(rsrc
->bmap
, rsrc
->max
);
134 __set_bit(id
, rsrc
->bmap
);
139 int rvu_alloc_rsrc_contig(struct rsrc_bmap
*rsrc
, int nrsrc
)
146 start
= bitmap_find_next_zero_area(rsrc
->bmap
, rsrc
->max
, 0, nrsrc
, 0);
147 if (start
>= rsrc
->max
)
150 bitmap_set(rsrc
->bmap
, start
, nrsrc
);
154 static void rvu_free_rsrc_contig(struct rsrc_bmap
*rsrc
, int nrsrc
, int start
)
158 if (start
>= rsrc
->max
)
161 bitmap_clear(rsrc
->bmap
, start
, nrsrc
);
164 bool rvu_rsrc_check_contig(struct rsrc_bmap
*rsrc
, int nrsrc
)
171 start
= bitmap_find_next_zero_area(rsrc
->bmap
, rsrc
->max
, 0, nrsrc
, 0);
172 if (start
>= rsrc
->max
)
178 void rvu_free_rsrc(struct rsrc_bmap
*rsrc
, int id
)
183 __clear_bit(id
, rsrc
->bmap
);
186 int rvu_rsrc_free_count(struct rsrc_bmap
*rsrc
)
193 used
= bitmap_weight(rsrc
->bmap
, rsrc
->max
);
194 return (rsrc
->max
- used
);
197 bool is_rsrc_free(struct rsrc_bmap
*rsrc
, int id
)
202 return !test_bit(id
, rsrc
->bmap
);
205 int rvu_alloc_bitmap(struct rsrc_bmap
*rsrc
)
207 rsrc
->bmap
= kcalloc(BITS_TO_LONGS(rsrc
->max
),
208 sizeof(long), GFP_KERNEL
);
214 void rvu_free_bitmap(struct rsrc_bmap
*rsrc
)
219 /* Get block LF's HW index from a PF_FUNC's block slot number */
220 int rvu_get_lf(struct rvu
*rvu
, struct rvu_block
*block
, u16 pcifunc
, u16 slot
)
225 mutex_lock(&rvu
->rsrc_lock
);
226 for (lf
= 0; lf
< block
->lf
.max
; lf
++) {
227 if (block
->fn_map
[lf
] == pcifunc
) {
229 mutex_unlock(&rvu
->rsrc_lock
);
235 mutex_unlock(&rvu
->rsrc_lock
);
239 /* Convert BLOCK_TYPE_E to a BLOCK_ADDR_E.
240 * Some silicon variants of OcteonTX2 supports
241 * multiple blocks of same type.
243 * @pcifunc has to be zero when no LF is yet attached.
245 * For a pcifunc if LFs are attached from multiple blocks of same type, then
246 * return blkaddr of first encountered block.
248 int rvu_get_blkaddr(struct rvu
*rvu
, int blktype
, u16 pcifunc
)
250 int devnum
, blkaddr
= -ENODEV
;
256 blkaddr
= BLKADDR_NPC
;
259 blkaddr
= BLKADDR_NPA
;
262 /* For now assume NIX0 */
264 blkaddr
= BLKADDR_NIX0
;
269 blkaddr
= BLKADDR_SSO
;
272 blkaddr
= BLKADDR_SSOW
;
275 blkaddr
= BLKADDR_TIM
;
278 /* For now assume CPT0 */
280 blkaddr
= BLKADDR_CPT0
;
286 /* Check if this is a RVU PF or VF */
287 if (pcifunc
& RVU_PFVF_FUNC_MASK
) {
289 devnum
= rvu_get_hwvf(rvu
, pcifunc
);
292 devnum
= rvu_get_pf(pcifunc
);
295 /* Check if the 'pcifunc' has a NIX LF from 'BLKADDR_NIX0' or
298 if (blktype
== BLKTYPE_NIX
) {
299 reg
= is_pf
? RVU_PRIV_PFX_NIXX_CFG(0) :
300 RVU_PRIV_HWVFX_NIXX_CFG(0);
301 cfg
= rvu_read64(rvu
, BLKADDR_RVUM
, reg
| (devnum
<< 16));
303 blkaddr
= BLKADDR_NIX0
;
307 reg
= is_pf
? RVU_PRIV_PFX_NIXX_CFG(1) :
308 RVU_PRIV_HWVFX_NIXX_CFG(1);
309 cfg
= rvu_read64(rvu
, BLKADDR_RVUM
, reg
| (devnum
<< 16));
311 blkaddr
= BLKADDR_NIX1
;
314 if (blktype
== BLKTYPE_CPT
) {
315 reg
= is_pf
? RVU_PRIV_PFX_CPTX_CFG(0) :
316 RVU_PRIV_HWVFX_CPTX_CFG(0);
317 cfg
= rvu_read64(rvu
, BLKADDR_RVUM
, reg
| (devnum
<< 16));
319 blkaddr
= BLKADDR_CPT0
;
323 reg
= is_pf
? RVU_PRIV_PFX_CPTX_CFG(1) :
324 RVU_PRIV_HWVFX_CPTX_CFG(1);
325 cfg
= rvu_read64(rvu
, BLKADDR_RVUM
, reg
| (devnum
<< 16));
327 blkaddr
= BLKADDR_CPT1
;
331 if (is_block_implemented(rvu
->hw
, blkaddr
))
336 static void rvu_update_rsrc_map(struct rvu
*rvu
, struct rvu_pfvf
*pfvf
,
337 struct rvu_block
*block
, u16 pcifunc
,
340 int devnum
, num_lfs
= 0;
344 if (lf
>= block
->lf
.max
) {
345 dev_err(&rvu
->pdev
->dev
,
346 "%s: FATAL: LF %d is >= %s's max lfs i.e %d\n",
347 __func__
, lf
, block
->name
, block
->lf
.max
);
351 /* Check if this is for a RVU PF or VF */
352 if (pcifunc
& RVU_PFVF_FUNC_MASK
) {
354 devnum
= rvu_get_hwvf(rvu
, pcifunc
);
357 devnum
= rvu_get_pf(pcifunc
);
360 block
->fn_map
[lf
] = attach
? pcifunc
: 0;
362 switch (block
->addr
) {
364 pfvf
->npalf
= attach
? true : false;
365 num_lfs
= pfvf
->npalf
;
369 pfvf
->nixlf
= attach
? true : false;
370 num_lfs
= pfvf
->nixlf
;
373 attach
? pfvf
->sso
++ : pfvf
->sso
--;
377 attach
? pfvf
->ssow
++ : pfvf
->ssow
--;
378 num_lfs
= pfvf
->ssow
;
381 attach
? pfvf
->timlfs
++ : pfvf
->timlfs
--;
382 num_lfs
= pfvf
->timlfs
;
385 attach
? pfvf
->cptlfs
++ : pfvf
->cptlfs
--;
386 num_lfs
= pfvf
->cptlfs
;
389 attach
? pfvf
->cpt1_lfs
++ : pfvf
->cpt1_lfs
--;
390 num_lfs
= pfvf
->cpt1_lfs
;
394 reg
= is_pf
? block
->pf_lfcnt_reg
: block
->vf_lfcnt_reg
;
395 rvu_write64(rvu
, BLKADDR_RVUM
, reg
| (devnum
<< 16), num_lfs
);
398 inline int rvu_get_pf(u16 pcifunc
)
400 return (pcifunc
>> RVU_PFVF_PF_SHIFT
) & RVU_PFVF_PF_MASK
;
403 void rvu_get_pf_numvfs(struct rvu
*rvu
, int pf
, int *numvfs
, int *hwvf
)
407 /* Get numVFs attached to this PF and first HWVF */
408 cfg
= rvu_read64(rvu
, BLKADDR_RVUM
, RVU_PRIV_PFX_CFG(pf
));
410 *numvfs
= (cfg
>> 12) & 0xFF;
415 static int rvu_get_hwvf(struct rvu
*rvu
, int pcifunc
)
420 pf
= rvu_get_pf(pcifunc
);
421 func
= pcifunc
& RVU_PFVF_FUNC_MASK
;
423 /* Get first HWVF attached to this PF */
424 cfg
= rvu_read64(rvu
, BLKADDR_RVUM
, RVU_PRIV_PFX_CFG(pf
));
426 return ((cfg
& 0xFFF) + func
- 1);
429 struct rvu_pfvf
*rvu_get_pfvf(struct rvu
*rvu
, int pcifunc
)
431 /* Check if it is a PF or VF */
432 if (pcifunc
& RVU_PFVF_FUNC_MASK
)
433 return &rvu
->hwvf
[rvu_get_hwvf(rvu
, pcifunc
)];
435 return &rvu
->pf
[rvu_get_pf(pcifunc
)];
438 static bool is_pf_func_valid(struct rvu
*rvu
, u16 pcifunc
)
443 pf
= rvu_get_pf(pcifunc
);
444 if (pf
>= rvu
->hw
->total_pfs
)
447 if (!(pcifunc
& RVU_PFVF_FUNC_MASK
))
450 /* Check if VF is within number of VFs attached to this PF */
451 vf
= (pcifunc
& RVU_PFVF_FUNC_MASK
) - 1;
452 cfg
= rvu_read64(rvu
, BLKADDR_RVUM
, RVU_PRIV_PFX_CFG(pf
));
453 nvfs
= (cfg
>> 12) & 0xFF;
460 bool is_block_implemented(struct rvu_hwinfo
*hw
, int blkaddr
)
462 struct rvu_block
*block
;
464 if (blkaddr
< BLKADDR_RVUM
|| blkaddr
>= BLK_COUNT
)
467 block
= &hw
->block
[blkaddr
];
468 return block
->implemented
;
471 static void rvu_check_block_implemented(struct rvu
*rvu
)
473 struct rvu_hwinfo
*hw
= rvu
->hw
;
474 struct rvu_block
*block
;
478 /* For each block check if 'implemented' bit is set */
479 for (blkid
= 0; blkid
< BLK_COUNT
; blkid
++) {
480 block
= &hw
->block
[blkid
];
481 cfg
= rvupf_read64(rvu
, RVU_PF_BLOCK_ADDRX_DISC(blkid
));
482 if (cfg
& BIT_ULL(11))
483 block
->implemented
= true;
487 static void rvu_setup_rvum_blk_revid(struct rvu
*rvu
)
489 rvu_write64(rvu
, BLKADDR_RVUM
,
490 RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM
),
494 static void rvu_clear_rvum_blk_revid(struct rvu
*rvu
)
496 rvu_write64(rvu
, BLKADDR_RVUM
,
497 RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM
), 0x00);
500 int rvu_lf_reset(struct rvu
*rvu
, struct rvu_block
*block
, int lf
)
504 if (!block
->implemented
)
507 rvu_write64(rvu
, block
->addr
, block
->lfreset_reg
, lf
| BIT_ULL(12));
508 err
= rvu_poll_reg(rvu
, block
->addr
, block
->lfreset_reg
, BIT_ULL(12),
513 static void rvu_block_reset(struct rvu
*rvu
, int blkaddr
, u64 rst_reg
)
515 struct rvu_block
*block
= &rvu
->hw
->block
[blkaddr
];
518 if (!block
->implemented
)
521 rvu_write64(rvu
, blkaddr
, rst_reg
, BIT_ULL(0));
522 err
= rvu_poll_reg(rvu
, blkaddr
, rst_reg
, BIT_ULL(63), true);
524 dev_err(rvu
->dev
, "HW block:%d reset failed\n", blkaddr
);
527 static void rvu_reset_all_blocks(struct rvu
*rvu
)
529 /* Do a HW reset of all RVU blocks */
530 rvu_block_reset(rvu
, BLKADDR_NPA
, NPA_AF_BLK_RST
);
531 rvu_block_reset(rvu
, BLKADDR_NIX0
, NIX_AF_BLK_RST
);
532 rvu_block_reset(rvu
, BLKADDR_NIX1
, NIX_AF_BLK_RST
);
533 rvu_block_reset(rvu
, BLKADDR_NPC
, NPC_AF_BLK_RST
);
534 rvu_block_reset(rvu
, BLKADDR_SSO
, SSO_AF_BLK_RST
);
535 rvu_block_reset(rvu
, BLKADDR_TIM
, TIM_AF_BLK_RST
);
536 rvu_block_reset(rvu
, BLKADDR_CPT0
, CPT_AF_BLK_RST
);
537 rvu_block_reset(rvu
, BLKADDR_CPT1
, CPT_AF_BLK_RST
);
538 rvu_block_reset(rvu
, BLKADDR_NDC_NIX0_RX
, NDC_AF_BLK_RST
);
539 rvu_block_reset(rvu
, BLKADDR_NDC_NIX0_TX
, NDC_AF_BLK_RST
);
540 rvu_block_reset(rvu
, BLKADDR_NDC_NIX1_RX
, NDC_AF_BLK_RST
);
541 rvu_block_reset(rvu
, BLKADDR_NDC_NIX1_TX
, NDC_AF_BLK_RST
);
542 rvu_block_reset(rvu
, BLKADDR_NDC_NPA0
, NDC_AF_BLK_RST
);
545 static void rvu_scan_block(struct rvu
*rvu
, struct rvu_block
*block
)
547 struct rvu_pfvf
*pfvf
;
551 for (lf
= 0; lf
< block
->lf
.max
; lf
++) {
552 cfg
= rvu_read64(rvu
, block
->addr
,
553 block
->lfcfg_reg
| (lf
<< block
->lfshift
));
554 if (!(cfg
& BIT_ULL(63)))
557 /* Set this resource as being used */
558 __set_bit(lf
, block
->lf
.bmap
);
560 /* Get, to whom this LF is attached */
561 pfvf
= rvu_get_pfvf(rvu
, (cfg
>> 8) & 0xFFFF);
562 rvu_update_rsrc_map(rvu
, pfvf
, block
,
563 (cfg
>> 8) & 0xFFFF, lf
, true);
565 /* Set start MSIX vector for this LF within this PF/VF */
566 rvu_set_msix_offset(rvu
, pfvf
, block
, lf
);
570 static void rvu_check_min_msix_vec(struct rvu
*rvu
, int nvecs
, int pf
, int vf
)
579 "PF%d:VF%d is configured with zero msix vectors, %d\n",
586 min_vecs
= RVU_AF_INT_VEC_CNT
+ RVU_PF_INT_VEC_CNT
;
588 min_vecs
= RVU_PF_INT_VEC_CNT
;
590 if (!(nvecs
< min_vecs
))
593 "PF%d is configured with too few vectors, %d, min is %d\n",
594 pf
, nvecs
, min_vecs
);
597 static int rvu_setup_msix_resources(struct rvu
*rvu
)
599 struct rvu_hwinfo
*hw
= rvu
->hw
;
600 int pf
, vf
, numvfs
, hwvf
, err
;
601 int nvecs
, offset
, max_msix
;
602 struct rvu_pfvf
*pfvf
;
606 for (pf
= 0; pf
< hw
->total_pfs
; pf
++) {
607 cfg
= rvu_read64(rvu
, BLKADDR_RVUM
, RVU_PRIV_PFX_CFG(pf
));
608 /* If PF is not enabled, nothing to do */
609 if (!((cfg
>> 20) & 0x01))
612 rvu_get_pf_numvfs(rvu
, pf
, &numvfs
, &hwvf
);
615 /* Get num of MSIX vectors attached to this PF */
616 cfg
= rvu_read64(rvu
, BLKADDR_RVUM
, RVU_PRIV_PFX_MSIX_CFG(pf
));
617 pfvf
->msix
.max
= ((cfg
>> 32) & 0xFFF) + 1;
618 rvu_check_min_msix_vec(rvu
, pfvf
->msix
.max
, pf
, 0);
620 /* Alloc msix bitmap for this PF */
621 err
= rvu_alloc_bitmap(&pfvf
->msix
);
625 /* Allocate memory for MSIX vector to RVU block LF mapping */
626 pfvf
->msix_lfmap
= devm_kcalloc(rvu
->dev
, pfvf
->msix
.max
,
627 sizeof(u16
), GFP_KERNEL
);
628 if (!pfvf
->msix_lfmap
)
631 /* For PF0 (AF) firmware will set msix vector offsets for
632 * AF, block AF and PF0_INT vectors, so jump to VFs.
637 /* Set MSIX offset for PF's 'RVU_PF_INT_VEC' vectors.
638 * These are allocated on driver init and never freed,
639 * so no need to set 'msix_lfmap' for these.
641 cfg
= rvu_read64(rvu
, BLKADDR_RVUM
, RVU_PRIV_PFX_INT_CFG(pf
));
642 nvecs
= (cfg
>> 12) & 0xFF;
644 offset
= rvu_alloc_rsrc_contig(&pfvf
->msix
, nvecs
);
645 rvu_write64(rvu
, BLKADDR_RVUM
,
646 RVU_PRIV_PFX_INT_CFG(pf
), cfg
| offset
);
648 /* Alloc msix bitmap for VFs */
649 for (vf
= 0; vf
< numvfs
; vf
++) {
650 pfvf
= &rvu
->hwvf
[hwvf
+ vf
];
651 /* Get num of MSIX vectors attached to this VF */
652 cfg
= rvu_read64(rvu
, BLKADDR_RVUM
,
653 RVU_PRIV_PFX_MSIX_CFG(pf
));
654 pfvf
->msix
.max
= (cfg
& 0xFFF) + 1;
655 rvu_check_min_msix_vec(rvu
, pfvf
->msix
.max
, pf
, vf
+ 1);
657 /* Alloc msix bitmap for this VF */
658 err
= rvu_alloc_bitmap(&pfvf
->msix
);
663 devm_kcalloc(rvu
->dev
, pfvf
->msix
.max
,
664 sizeof(u16
), GFP_KERNEL
);
665 if (!pfvf
->msix_lfmap
)
668 /* Set MSIX offset for HWVF's 'RVU_VF_INT_VEC' vectors.
669 * These are allocated on driver init and never freed,
670 * so no need to set 'msix_lfmap' for these.
672 cfg
= rvu_read64(rvu
, BLKADDR_RVUM
,
673 RVU_PRIV_HWVFX_INT_CFG(hwvf
+ vf
));
674 nvecs
= (cfg
>> 12) & 0xFF;
676 offset
= rvu_alloc_rsrc_contig(&pfvf
->msix
, nvecs
);
677 rvu_write64(rvu
, BLKADDR_RVUM
,
678 RVU_PRIV_HWVFX_INT_CFG(hwvf
+ vf
),
683 /* HW interprets RVU_AF_MSIXTR_BASE address as an IOVA, hence
684 * create an IOMMU mapping for the physical address configured by
685 * firmware and reconfig RVU_AF_MSIXTR_BASE with IOVA.
687 cfg
= rvu_read64(rvu
, BLKADDR_RVUM
, RVU_PRIV_CONST
);
688 max_msix
= cfg
& 0xFFFFF;
689 if (rvu
->fwdata
&& rvu
->fwdata
->msixtr_base
)
690 phy_addr
= rvu
->fwdata
->msixtr_base
;
692 phy_addr
= rvu_read64(rvu
, BLKADDR_RVUM
, RVU_AF_MSIXTR_BASE
);
694 iova
= dma_map_resource(rvu
->dev
, phy_addr
,
695 max_msix
* PCI_MSIX_ENTRY_SIZE
,
696 DMA_BIDIRECTIONAL
, 0);
698 if (dma_mapping_error(rvu
->dev
, iova
))
701 rvu_write64(rvu
, BLKADDR_RVUM
, RVU_AF_MSIXTR_BASE
, (u64
)iova
);
702 rvu
->msix_base_iova
= iova
;
703 rvu
->msixtr_base_phy
= phy_addr
;
708 static void rvu_reset_msix(struct rvu
*rvu
)
710 /* Restore msixtr base register */
711 rvu_write64(rvu
, BLKADDR_RVUM
, RVU_AF_MSIXTR_BASE
,
712 rvu
->msixtr_base_phy
);
715 static void rvu_free_hw_resources(struct rvu
*rvu
)
717 struct rvu_hwinfo
*hw
= rvu
->hw
;
718 struct rvu_block
*block
;
719 struct rvu_pfvf
*pfvf
;
723 rvu_npa_freemem(rvu
);
724 rvu_npc_freemem(rvu
);
725 rvu_nix_freemem(rvu
);
727 /* Free block LF bitmaps */
728 for (id
= 0; id
< BLK_COUNT
; id
++) {
729 block
= &hw
->block
[id
];
730 kfree(block
->lf
.bmap
);
733 /* Free MSIX bitmaps */
734 for (id
= 0; id
< hw
->total_pfs
; id
++) {
736 kfree(pfvf
->msix
.bmap
);
739 for (id
= 0; id
< hw
->total_vfs
; id
++) {
740 pfvf
= &rvu
->hwvf
[id
];
741 kfree(pfvf
->msix
.bmap
);
744 /* Unmap MSIX vector base IOVA mapping */
745 if (!rvu
->msix_base_iova
)
747 cfg
= rvu_read64(rvu
, BLKADDR_RVUM
, RVU_PRIV_CONST
);
748 max_msix
= cfg
& 0xFFFFF;
749 dma_unmap_resource(rvu
->dev
, rvu
->msix_base_iova
,
750 max_msix
* PCI_MSIX_ENTRY_SIZE
,
751 DMA_BIDIRECTIONAL
, 0);
754 mutex_destroy(&rvu
->rsrc_lock
);
757 static void rvu_setup_pfvf_macaddress(struct rvu
*rvu
)
759 struct rvu_hwinfo
*hw
= rvu
->hw
;
760 int pf
, vf
, numvfs
, hwvf
;
761 struct rvu_pfvf
*pfvf
;
764 for (pf
= 0; pf
< hw
->total_pfs
; pf
++) {
765 /* For PF0(AF), Assign MAC address to only VFs (LBKVFs) */
769 if (!is_pf_cgxmapped(rvu
, pf
))
771 /* Assign MAC address to PF */
773 if (rvu
->fwdata
&& pf
< PF_MACNUM_MAX
) {
774 mac
= &rvu
->fwdata
->pf_macs
[pf
];
776 u64_to_ether_addr(*mac
, pfvf
->mac_addr
);
778 eth_random_addr(pfvf
->mac_addr
);
780 eth_random_addr(pfvf
->mac_addr
);
782 ether_addr_copy(pfvf
->default_mac
, pfvf
->mac_addr
);
785 /* Assign MAC address to VFs*/
786 rvu_get_pf_numvfs(rvu
, pf
, &numvfs
, &hwvf
);
787 for (vf
= 0; vf
< numvfs
; vf
++, hwvf
++) {
788 pfvf
= &rvu
->hwvf
[hwvf
];
789 if (rvu
->fwdata
&& hwvf
< VF_MACNUM_MAX
) {
790 mac
= &rvu
->fwdata
->vf_macs
[hwvf
];
792 u64_to_ether_addr(*mac
, pfvf
->mac_addr
);
794 eth_random_addr(pfvf
->mac_addr
);
796 eth_random_addr(pfvf
->mac_addr
);
798 ether_addr_copy(pfvf
->default_mac
, pfvf
->mac_addr
);
803 static int rvu_fwdata_init(struct rvu
*rvu
)
808 /* Get firmware data base address */
809 err
= cgx_get_fwdata_base(&fwdbase
);
812 rvu
->fwdata
= ioremap_wc(fwdbase
, sizeof(struct rvu_fwdata
));
815 if (!is_rvu_fwdata_valid(rvu
)) {
817 "Mismatch in 'fwdata' struct btw kernel and firmware\n");
818 iounmap(rvu
->fwdata
);
824 dev_info(rvu
->dev
, "Unable to fetch 'fwdata' from firmware\n");
828 static void rvu_fwdata_exit(struct rvu
*rvu
)
831 iounmap(rvu
->fwdata
);
834 static int rvu_setup_nix_hw_resource(struct rvu
*rvu
, int blkaddr
)
836 struct rvu_hwinfo
*hw
= rvu
->hw
;
837 struct rvu_block
*block
;
841 /* Init NIX LF's bitmap */
842 block
= &hw
->block
[blkaddr
];
843 if (!block
->implemented
)
845 blkid
= (blkaddr
== BLKADDR_NIX0
) ? 0 : 1;
846 cfg
= rvu_read64(rvu
, blkaddr
, NIX_AF_CONST2
);
847 block
->lf
.max
= cfg
& 0xFFF;
848 block
->addr
= blkaddr
;
849 block
->type
= BLKTYPE_NIX
;
851 block
->lookup_reg
= NIX_AF_RVU_LF_CFG_DEBUG
;
852 block
->pf_lfcnt_reg
= RVU_PRIV_PFX_NIXX_CFG(blkid
);
853 block
->vf_lfcnt_reg
= RVU_PRIV_HWVFX_NIXX_CFG(blkid
);
854 block
->lfcfg_reg
= NIX_PRIV_LFX_CFG
;
855 block
->msixcfg_reg
= NIX_PRIV_LFX_INT_CFG
;
856 block
->lfreset_reg
= NIX_AF_LF_RST
;
857 sprintf(block
->name
, "NIX%d", blkid
);
858 rvu
->nix_blkaddr
[blkid
] = blkaddr
;
859 return rvu_alloc_bitmap(&block
->lf
);
862 static int rvu_setup_cpt_hw_resource(struct rvu
*rvu
, int blkaddr
)
864 struct rvu_hwinfo
*hw
= rvu
->hw
;
865 struct rvu_block
*block
;
869 /* Init CPT LF's bitmap */
870 block
= &hw
->block
[blkaddr
];
871 if (!block
->implemented
)
873 blkid
= (blkaddr
== BLKADDR_CPT0
) ? 0 : 1;
874 cfg
= rvu_read64(rvu
, blkaddr
, CPT_AF_CONSTANTS0
);
875 block
->lf
.max
= cfg
& 0xFF;
876 block
->addr
= blkaddr
;
877 block
->type
= BLKTYPE_CPT
;
878 block
->multislot
= true;
880 block
->lookup_reg
= CPT_AF_RVU_LF_CFG_DEBUG
;
881 block
->pf_lfcnt_reg
= RVU_PRIV_PFX_CPTX_CFG(blkid
);
882 block
->vf_lfcnt_reg
= RVU_PRIV_HWVFX_CPTX_CFG(blkid
);
883 block
->lfcfg_reg
= CPT_PRIV_LFX_CFG
;
884 block
->msixcfg_reg
= CPT_PRIV_LFX_INT_CFG
;
885 block
->lfreset_reg
= CPT_AF_LF_RST
;
886 sprintf(block
->name
, "CPT%d", blkid
);
887 return rvu_alloc_bitmap(&block
->lf
);
890 static void rvu_get_lbk_bufsize(struct rvu
*rvu
)
892 struct pci_dev
*pdev
= NULL
;
896 pdev
= pci_get_device(PCI_VENDOR_ID_CAVIUM
,
897 PCI_DEVID_OCTEONTX2_LBK
, pdev
);
901 base
= pci_ioremap_bar(pdev
, 0);
905 lbk_const
= readq(base
+ LBK_CONST
);
907 /* cache fifo size */
908 rvu
->hw
->lbk_bufsize
= FIELD_GET(LBK_CONST_BUF_SIZE
, lbk_const
);
915 static int rvu_setup_hw_resources(struct rvu
*rvu
)
917 struct rvu_hwinfo
*hw
= rvu
->hw
;
918 struct rvu_block
*block
;
922 /* Get HW supported max RVU PF & VF count */
923 cfg
= rvu_read64(rvu
, BLKADDR_RVUM
, RVU_PRIV_CONST
);
924 hw
->total_pfs
= (cfg
>> 32) & 0xFF;
925 hw
->total_vfs
= (cfg
>> 20) & 0xFFF;
926 hw
->max_vfs_per_pf
= (cfg
>> 40) & 0xFF;
928 /* Init NPA LF's bitmap */
929 block
= &hw
->block
[BLKADDR_NPA
];
930 if (!block
->implemented
)
932 cfg
= rvu_read64(rvu
, BLKADDR_NPA
, NPA_AF_CONST
);
933 block
->lf
.max
= (cfg
>> 16) & 0xFFF;
934 block
->addr
= BLKADDR_NPA
;
935 block
->type
= BLKTYPE_NPA
;
937 block
->lookup_reg
= NPA_AF_RVU_LF_CFG_DEBUG
;
938 block
->pf_lfcnt_reg
= RVU_PRIV_PFX_NPA_CFG
;
939 block
->vf_lfcnt_reg
= RVU_PRIV_HWVFX_NPA_CFG
;
940 block
->lfcfg_reg
= NPA_PRIV_LFX_CFG
;
941 block
->msixcfg_reg
= NPA_PRIV_LFX_INT_CFG
;
942 block
->lfreset_reg
= NPA_AF_LF_RST
;
943 sprintf(block
->name
, "NPA");
944 err
= rvu_alloc_bitmap(&block
->lf
);
947 "%s: Failed to allocate NPA LF bitmap\n", __func__
);
952 err
= rvu_setup_nix_hw_resource(rvu
, BLKADDR_NIX0
);
955 "%s: Failed to allocate NIX0 LFs bitmap\n", __func__
);
959 err
= rvu_setup_nix_hw_resource(rvu
, BLKADDR_NIX1
);
962 "%s: Failed to allocate NIX1 LFs bitmap\n", __func__
);
966 /* Init SSO group's bitmap */
967 block
= &hw
->block
[BLKADDR_SSO
];
968 if (!block
->implemented
)
970 cfg
= rvu_read64(rvu
, BLKADDR_SSO
, SSO_AF_CONST
);
971 block
->lf
.max
= cfg
& 0xFFFF;
972 block
->addr
= BLKADDR_SSO
;
973 block
->type
= BLKTYPE_SSO
;
974 block
->multislot
= true;
976 block
->lookup_reg
= SSO_AF_RVU_LF_CFG_DEBUG
;
977 block
->pf_lfcnt_reg
= RVU_PRIV_PFX_SSO_CFG
;
978 block
->vf_lfcnt_reg
= RVU_PRIV_HWVFX_SSO_CFG
;
979 block
->lfcfg_reg
= SSO_PRIV_LFX_HWGRP_CFG
;
980 block
->msixcfg_reg
= SSO_PRIV_LFX_HWGRP_INT_CFG
;
981 block
->lfreset_reg
= SSO_AF_LF_HWGRP_RST
;
982 sprintf(block
->name
, "SSO GROUP");
983 err
= rvu_alloc_bitmap(&block
->lf
);
986 "%s: Failed to allocate SSO LF bitmap\n", __func__
);
991 /* Init SSO workslot's bitmap */
992 block
= &hw
->block
[BLKADDR_SSOW
];
993 if (!block
->implemented
)
995 block
->lf
.max
= (cfg
>> 56) & 0xFF;
996 block
->addr
= BLKADDR_SSOW
;
997 block
->type
= BLKTYPE_SSOW
;
998 block
->multislot
= true;
1000 block
->lookup_reg
= SSOW_AF_RVU_LF_HWS_CFG_DEBUG
;
1001 block
->pf_lfcnt_reg
= RVU_PRIV_PFX_SSOW_CFG
;
1002 block
->vf_lfcnt_reg
= RVU_PRIV_HWVFX_SSOW_CFG
;
1003 block
->lfcfg_reg
= SSOW_PRIV_LFX_HWS_CFG
;
1004 block
->msixcfg_reg
= SSOW_PRIV_LFX_HWS_INT_CFG
;
1005 block
->lfreset_reg
= SSOW_AF_LF_HWS_RST
;
1006 sprintf(block
->name
, "SSOWS");
1007 err
= rvu_alloc_bitmap(&block
->lf
);
1010 "%s: Failed to allocate SSOW LF bitmap\n", __func__
);
1015 /* Init TIM LF's bitmap */
1016 block
= &hw
->block
[BLKADDR_TIM
];
1017 if (!block
->implemented
)
1019 cfg
= rvu_read64(rvu
, BLKADDR_TIM
, TIM_AF_CONST
);
1020 block
->lf
.max
= cfg
& 0xFFFF;
1021 block
->addr
= BLKADDR_TIM
;
1022 block
->type
= BLKTYPE_TIM
;
1023 block
->multislot
= true;
1025 block
->lookup_reg
= TIM_AF_RVU_LF_CFG_DEBUG
;
1026 block
->pf_lfcnt_reg
= RVU_PRIV_PFX_TIM_CFG
;
1027 block
->vf_lfcnt_reg
= RVU_PRIV_HWVFX_TIM_CFG
;
1028 block
->lfcfg_reg
= TIM_PRIV_LFX_CFG
;
1029 block
->msixcfg_reg
= TIM_PRIV_LFX_INT_CFG
;
1030 block
->lfreset_reg
= TIM_AF_LF_RST
;
1031 sprintf(block
->name
, "TIM");
1032 err
= rvu_alloc_bitmap(&block
->lf
);
1035 "%s: Failed to allocate TIM LF bitmap\n", __func__
);
1040 err
= rvu_setup_cpt_hw_resource(rvu
, BLKADDR_CPT0
);
1043 "%s: Failed to allocate CPT0 LF bitmap\n", __func__
);
1046 err
= rvu_setup_cpt_hw_resource(rvu
, BLKADDR_CPT1
);
1049 "%s: Failed to allocate CPT1 LF bitmap\n", __func__
);
1053 /* Allocate memory for PFVF data */
1054 rvu
->pf
= devm_kcalloc(rvu
->dev
, hw
->total_pfs
,
1055 sizeof(struct rvu_pfvf
), GFP_KERNEL
);
1058 "%s: Failed to allocate memory for PF's rvu_pfvf struct\n", __func__
);
1062 rvu
->hwvf
= devm_kcalloc(rvu
->dev
, hw
->total_vfs
,
1063 sizeof(struct rvu_pfvf
), GFP_KERNEL
);
1066 "%s: Failed to allocate memory for VF's rvu_pfvf struct\n", __func__
);
1070 mutex_init(&rvu
->rsrc_lock
);
1072 rvu_fwdata_init(rvu
);
1074 err
= rvu_setup_msix_resources(rvu
);
1077 "%s: Failed to setup MSIX resources\n", __func__
);
1081 for (blkid
= 0; blkid
< BLK_COUNT
; blkid
++) {
1082 block
= &hw
->block
[blkid
];
1083 if (!block
->lf
.bmap
)
1086 /* Allocate memory for block LF/slot to pcifunc mapping info */
1087 block
->fn_map
= devm_kcalloc(rvu
->dev
, block
->lf
.max
,
1088 sizeof(u16
), GFP_KERNEL
);
1089 if (!block
->fn_map
) {
1094 /* Scan all blocks to check if low level firmware has
1095 * already provisioned any of the resources to a PF/VF.
1097 rvu_scan_block(rvu
, block
);
1100 err
= rvu_set_channels_base(rvu
);
1104 err
= rvu_npc_init(rvu
);
1106 dev_err(rvu
->dev
, "%s: Failed to initialize npc\n", __func__
);
1110 err
= rvu_cgx_init(rvu
);
1112 dev_err(rvu
->dev
, "%s: Failed to initialize cgx\n", __func__
);
1116 /* Assign MACs for CGX mapped functions */
1117 rvu_setup_pfvf_macaddress(rvu
);
1119 err
= rvu_npa_init(rvu
);
1121 dev_err(rvu
->dev
, "%s: Failed to initialize npa\n", __func__
);
1125 rvu_get_lbk_bufsize(rvu
);
1127 err
= rvu_nix_init(rvu
);
1129 dev_err(rvu
->dev
, "%s: Failed to initialize nix\n", __func__
);
1133 err
= rvu_sdp_init(rvu
);
1135 dev_err(rvu
->dev
, "%s: Failed to initialize sdp\n", __func__
);
1139 rvu_program_channels(rvu
);
1144 rvu_nix_freemem(rvu
);
1146 rvu_npa_freemem(rvu
);
1150 rvu_npc_freemem(rvu
);
1151 rvu_fwdata_exit(rvu
);
1153 rvu_reset_msix(rvu
);
1157 /* NPA and NIX admin queue APIs */
1158 void rvu_aq_free(struct rvu
*rvu
, struct admin_queue
*aq
)
1163 qmem_free(rvu
->dev
, aq
->inst
);
1164 qmem_free(rvu
->dev
, aq
->res
);
1165 devm_kfree(rvu
->dev
, aq
);
1168 int rvu_aq_alloc(struct rvu
*rvu
, struct admin_queue
**ad_queue
,
1169 int qsize
, int inst_size
, int res_size
)
1171 struct admin_queue
*aq
;
1174 *ad_queue
= devm_kzalloc(rvu
->dev
, sizeof(*aq
), GFP_KERNEL
);
1179 /* Alloc memory for instructions i.e AQ */
1180 err
= qmem_alloc(rvu
->dev
, &aq
->inst
, qsize
, inst_size
);
1182 devm_kfree(rvu
->dev
, aq
);
1186 /* Alloc memory for results */
1187 err
= qmem_alloc(rvu
->dev
, &aq
->res
, qsize
, res_size
);
1189 rvu_aq_free(rvu
, aq
);
1193 spin_lock_init(&aq
->lock
);
1197 int rvu_mbox_handler_ready(struct rvu
*rvu
, struct msg_req
*req
,
1198 struct ready_msg_rsp
*rsp
)
1201 rsp
->rclk_freq
= rvu
->fwdata
->rclk
;
1202 rsp
->sclk_freq
= rvu
->fwdata
->sclk
;
1207 /* Get current count of a RVU block's LF/slots
1208 * provisioned to a given RVU func.
1210 u16
rvu_get_rsrc_mapcount(struct rvu_pfvf
*pfvf
, int blkaddr
)
1214 return pfvf
->npalf
? 1 : 0;
1217 return pfvf
->nixlf
? 1 : 0;
1223 return pfvf
->timlfs
;
1225 return pfvf
->cptlfs
;
1227 return pfvf
->cpt1_lfs
;
1232 /* Return true if LFs of block type are attached to pcifunc */
1233 static bool is_blktype_attached(struct rvu_pfvf
*pfvf
, int blktype
)
1237 return pfvf
->npalf
? 1 : 0;
1239 return pfvf
->nixlf
? 1 : 0;
1243 return !!pfvf
->ssow
;
1245 return !!pfvf
->timlfs
;
1247 return pfvf
->cptlfs
|| pfvf
->cpt1_lfs
;
1253 bool is_pffunc_map_valid(struct rvu
*rvu
, u16 pcifunc
, int blktype
)
1255 struct rvu_pfvf
*pfvf
;
1257 if (!is_pf_func_valid(rvu
, pcifunc
))
1260 pfvf
= rvu_get_pfvf(rvu
, pcifunc
);
1262 /* Check if this PFFUNC has a LF of type blktype attached */
1263 if (!is_blktype_attached(pfvf
, blktype
))
1269 static int rvu_lookup_rsrc(struct rvu
*rvu
, struct rvu_block
*block
,
1270 int pcifunc
, int slot
)
1274 val
= ((u64
)pcifunc
<< 24) | (slot
<< 16) | (1ULL << 13);
1275 rvu_write64(rvu
, block
->addr
, block
->lookup_reg
, val
);
1276 /* Wait for the lookup to finish */
1277 /* TODO: put some timeout here */
1278 while (rvu_read64(rvu
, block
->addr
, block
->lookup_reg
) & (1ULL << 13))
1281 val
= rvu_read64(rvu
, block
->addr
, block
->lookup_reg
);
1283 /* Check LF valid bit */
1284 if (!(val
& (1ULL << 12)))
1287 return (val
& 0xFFF);
1290 static void rvu_detach_block(struct rvu
*rvu
, int pcifunc
, int blktype
)
1292 struct rvu_pfvf
*pfvf
= rvu_get_pfvf(rvu
, pcifunc
);
1293 struct rvu_hwinfo
*hw
= rvu
->hw
;
1294 struct rvu_block
*block
;
1295 int slot
, lf
, num_lfs
;
1298 blkaddr
= rvu_get_blkaddr(rvu
, blktype
, pcifunc
);
1302 if (blktype
== BLKTYPE_NIX
)
1303 rvu_nix_reset_mac(pfvf
, pcifunc
);
1305 block
= &hw
->block
[blkaddr
];
1307 num_lfs
= rvu_get_rsrc_mapcount(pfvf
, block
->addr
);
1311 for (slot
= 0; slot
< num_lfs
; slot
++) {
1312 lf
= rvu_lookup_rsrc(rvu
, block
, pcifunc
, slot
);
1313 if (lf
< 0) /* This should never happen */
1316 /* Disable the LF */
1317 rvu_write64(rvu
, blkaddr
, block
->lfcfg_reg
|
1318 (lf
<< block
->lfshift
), 0x00ULL
);
1320 /* Update SW maintained mapping info as well */
1321 rvu_update_rsrc_map(rvu
, pfvf
, block
,
1322 pcifunc
, lf
, false);
1324 /* Free the resource */
1325 rvu_free_rsrc(&block
->lf
, lf
);
1327 /* Clear MSIX vector offset for this LF */
1328 rvu_clear_msix_offset(rvu
, pfvf
, block
, lf
);
1332 static int rvu_detach_rsrcs(struct rvu
*rvu
, struct rsrc_detach
*detach
,
1335 struct rvu_hwinfo
*hw
= rvu
->hw
;
1336 bool detach_all
= true;
1337 struct rvu_block
*block
;
1340 mutex_lock(&rvu
->rsrc_lock
);
1342 /* Check for partial resource detach */
1343 if (detach
&& detach
->partial
)
1346 /* Check for RVU block's LFs attached to this func,
1347 * if so, detach them.
1349 for (blkid
= 0; blkid
< BLK_COUNT
; blkid
++) {
1350 block
= &hw
->block
[blkid
];
1351 if (!block
->lf
.bmap
)
1353 if (!detach_all
&& detach
) {
1354 if (blkid
== BLKADDR_NPA
&& !detach
->npalf
)
1356 else if ((blkid
== BLKADDR_NIX0
) && !detach
->nixlf
)
1358 else if ((blkid
== BLKADDR_NIX1
) && !detach
->nixlf
)
1360 else if ((blkid
== BLKADDR_SSO
) && !detach
->sso
)
1362 else if ((blkid
== BLKADDR_SSOW
) && !detach
->ssow
)
1364 else if ((blkid
== BLKADDR_TIM
) && !detach
->timlfs
)
1366 else if ((blkid
== BLKADDR_CPT0
) && !detach
->cptlfs
)
1368 else if ((blkid
== BLKADDR_CPT1
) && !detach
->cptlfs
)
1371 rvu_detach_block(rvu
, pcifunc
, block
->type
);
1374 mutex_unlock(&rvu
->rsrc_lock
);
1378 int rvu_mbox_handler_detach_resources(struct rvu
*rvu
,
1379 struct rsrc_detach
*detach
,
1380 struct msg_rsp
*rsp
)
1382 return rvu_detach_rsrcs(rvu
, detach
, detach
->hdr
.pcifunc
);
1385 int rvu_get_nix_blkaddr(struct rvu
*rvu
, u16 pcifunc
)
1387 struct rvu_pfvf
*pfvf
= rvu_get_pfvf(rvu
, pcifunc
);
1388 int blkaddr
= BLKADDR_NIX0
, vf
;
1389 struct rvu_pfvf
*pf
;
1391 pf
= rvu_get_pfvf(rvu
, pcifunc
& ~RVU_PFVF_FUNC_MASK
);
1393 /* All CGX mapped PFs are set with assigned NIX block during init */
1394 if (is_pf_cgxmapped(rvu
, rvu_get_pf(pcifunc
))) {
1395 blkaddr
= pf
->nix_blkaddr
;
1396 } else if (is_afvf(pcifunc
)) {
1398 /* Assign NIX based on VF number. All even numbered VFs get
1399 * NIX0 and odd numbered gets NIX1
1401 blkaddr
= (vf
& 1) ? BLKADDR_NIX1
: BLKADDR_NIX0
;
1402 /* NIX1 is not present on all silicons */
1403 if (!is_block_implemented(rvu
->hw
, BLKADDR_NIX1
))
1404 blkaddr
= BLKADDR_NIX0
;
1407 /* if SDP1 then the blkaddr is NIX1 */
1408 if (is_sdp_pfvf(pcifunc
) && pf
->sdp_info
->node_id
== 1)
1409 blkaddr
= BLKADDR_NIX1
;
1413 pfvf
->nix_blkaddr
= BLKADDR_NIX1
;
1414 pfvf
->nix_rx_intf
= NIX_INTFX_RX(1);
1415 pfvf
->nix_tx_intf
= NIX_INTFX_TX(1);
1419 pfvf
->nix_blkaddr
= BLKADDR_NIX0
;
1420 pfvf
->nix_rx_intf
= NIX_INTFX_RX(0);
1421 pfvf
->nix_tx_intf
= NIX_INTFX_TX(0);
1425 return pfvf
->nix_blkaddr
;
1428 static int rvu_get_attach_blkaddr(struct rvu
*rvu
, int blktype
,
1429 u16 pcifunc
, struct rsrc_attach
*attach
)
1435 blkaddr
= rvu_get_nix_blkaddr(rvu
, pcifunc
);
1438 if (attach
->hdr
.ver
< RVU_MULTI_BLK_VER
)
1439 return rvu_get_blkaddr(rvu
, blktype
, 0);
1440 blkaddr
= attach
->cpt_blkaddr
? attach
->cpt_blkaddr
:
1442 if (blkaddr
!= BLKADDR_CPT0
&& blkaddr
!= BLKADDR_CPT1
)
1446 return rvu_get_blkaddr(rvu
, blktype
, 0);
1449 if (is_block_implemented(rvu
->hw
, blkaddr
))
1455 static void rvu_attach_block(struct rvu
*rvu
, int pcifunc
, int blktype
,
1456 int num_lfs
, struct rsrc_attach
*attach
)
1458 struct rvu_pfvf
*pfvf
= rvu_get_pfvf(rvu
, pcifunc
);
1459 struct rvu_hwinfo
*hw
= rvu
->hw
;
1460 struct rvu_block
*block
;
1468 blkaddr
= rvu_get_attach_blkaddr(rvu
, blktype
, pcifunc
, attach
);
1472 block
= &hw
->block
[blkaddr
];
1473 if (!block
->lf
.bmap
)
1476 for (slot
= 0; slot
< num_lfs
; slot
++) {
1477 /* Allocate the resource */
1478 lf
= rvu_alloc_rsrc(&block
->lf
);
1482 cfg
= (1ULL << 63) | (pcifunc
<< 8) | slot
;
1483 rvu_write64(rvu
, blkaddr
, block
->lfcfg_reg
|
1484 (lf
<< block
->lfshift
), cfg
);
1485 rvu_update_rsrc_map(rvu
, pfvf
, block
,
1488 /* Set start MSIX vector for this LF within this PF/VF */
1489 rvu_set_msix_offset(rvu
, pfvf
, block
, lf
);
1493 static int rvu_check_rsrc_availability(struct rvu
*rvu
,
1494 struct rsrc_attach
*req
, u16 pcifunc
)
1496 struct rvu_pfvf
*pfvf
= rvu_get_pfvf(rvu
, pcifunc
);
1497 int free_lfs
, mappedlfs
, blkaddr
;
1498 struct rvu_hwinfo
*hw
= rvu
->hw
;
1499 struct rvu_block
*block
;
1501 /* Only one NPA LF can be attached */
1502 if (req
->npalf
&& !is_blktype_attached(pfvf
, BLKTYPE_NPA
)) {
1503 block
= &hw
->block
[BLKADDR_NPA
];
1504 free_lfs
= rvu_rsrc_free_count(&block
->lf
);
1507 } else if (req
->npalf
) {
1508 dev_err(&rvu
->pdev
->dev
,
1509 "Func 0x%x: Invalid req, already has NPA\n",
1514 /* Only one NIX LF can be attached */
1515 if (req
->nixlf
&& !is_blktype_attached(pfvf
, BLKTYPE_NIX
)) {
1516 blkaddr
= rvu_get_attach_blkaddr(rvu
, BLKTYPE_NIX
,
1520 block
= &hw
->block
[blkaddr
];
1521 free_lfs
= rvu_rsrc_free_count(&block
->lf
);
1524 } else if (req
->nixlf
) {
1525 dev_err(&rvu
->pdev
->dev
,
1526 "Func 0x%x: Invalid req, already has NIX\n",
1532 block
= &hw
->block
[BLKADDR_SSO
];
1533 /* Is request within limits ? */
1534 if (req
->sso
> block
->lf
.max
) {
1535 dev_err(&rvu
->pdev
->dev
,
1536 "Func 0x%x: Invalid SSO req, %d > max %d\n",
1537 pcifunc
, req
->sso
, block
->lf
.max
);
1540 mappedlfs
= rvu_get_rsrc_mapcount(pfvf
, block
->addr
);
1541 free_lfs
= rvu_rsrc_free_count(&block
->lf
);
1542 /* Check if additional resources are available */
1543 if (req
->sso
> mappedlfs
&&
1544 ((req
->sso
- mappedlfs
) > free_lfs
))
1549 block
= &hw
->block
[BLKADDR_SSOW
];
1550 if (req
->ssow
> block
->lf
.max
) {
1551 dev_err(&rvu
->pdev
->dev
,
1552 "Func 0x%x: Invalid SSOW req, %d > max %d\n",
1553 pcifunc
, req
->sso
, block
->lf
.max
);
1556 mappedlfs
= rvu_get_rsrc_mapcount(pfvf
, block
->addr
);
1557 free_lfs
= rvu_rsrc_free_count(&block
->lf
);
1558 if (req
->ssow
> mappedlfs
&&
1559 ((req
->ssow
- mappedlfs
) > free_lfs
))
1564 block
= &hw
->block
[BLKADDR_TIM
];
1565 if (req
->timlfs
> block
->lf
.max
) {
1566 dev_err(&rvu
->pdev
->dev
,
1567 "Func 0x%x: Invalid TIMLF req, %d > max %d\n",
1568 pcifunc
, req
->timlfs
, block
->lf
.max
);
1571 mappedlfs
= rvu_get_rsrc_mapcount(pfvf
, block
->addr
);
1572 free_lfs
= rvu_rsrc_free_count(&block
->lf
);
1573 if (req
->timlfs
> mappedlfs
&&
1574 ((req
->timlfs
- mappedlfs
) > free_lfs
))
1579 blkaddr
= rvu_get_attach_blkaddr(rvu
, BLKTYPE_CPT
,
1583 block
= &hw
->block
[blkaddr
];
1584 if (req
->cptlfs
> block
->lf
.max
) {
1585 dev_err(&rvu
->pdev
->dev
,
1586 "Func 0x%x: Invalid CPTLF req, %d > max %d\n",
1587 pcifunc
, req
->cptlfs
, block
->lf
.max
);
1590 mappedlfs
= rvu_get_rsrc_mapcount(pfvf
, block
->addr
);
1591 free_lfs
= rvu_rsrc_free_count(&block
->lf
);
1592 if (req
->cptlfs
> mappedlfs
&&
1593 ((req
->cptlfs
- mappedlfs
) > free_lfs
))
1600 dev_info(rvu
->dev
, "Request for %s failed\n", block
->name
);
1604 static bool rvu_attach_from_same_block(struct rvu
*rvu
, int blktype
,
1605 struct rsrc_attach
*attach
)
1607 int blkaddr
, num_lfs
;
1609 blkaddr
= rvu_get_attach_blkaddr(rvu
, blktype
,
1610 attach
->hdr
.pcifunc
, attach
);
1614 num_lfs
= rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu
, attach
->hdr
.pcifunc
),
1616 /* Requester already has LFs from given block ? */
1620 int rvu_mbox_handler_attach_resources(struct rvu
*rvu
,
1621 struct rsrc_attach
*attach
,
1622 struct msg_rsp
*rsp
)
1624 u16 pcifunc
= attach
->hdr
.pcifunc
;
1627 /* If first request, detach all existing attached resources */
1628 if (!attach
->modify
)
1629 rvu_detach_rsrcs(rvu
, NULL
, pcifunc
);
1631 mutex_lock(&rvu
->rsrc_lock
);
1633 /* Check if the request can be accommodated */
1634 err
= rvu_check_rsrc_availability(rvu
, attach
, pcifunc
);
1638 /* Now attach the requested resources */
1640 rvu_attach_block(rvu
, pcifunc
, BLKTYPE_NPA
, 1, attach
);
1643 rvu_attach_block(rvu
, pcifunc
, BLKTYPE_NIX
, 1, attach
);
1646 /* RVU func doesn't know which exact LF or slot is attached
1647 * to it, it always sees as slot 0,1,2. So for a 'modify'
1648 * request, simply detach all existing attached LFs/slots
1649 * and attach a fresh.
1652 rvu_detach_block(rvu
, pcifunc
, BLKTYPE_SSO
);
1653 rvu_attach_block(rvu
, pcifunc
, BLKTYPE_SSO
,
1654 attach
->sso
, attach
);
1659 rvu_detach_block(rvu
, pcifunc
, BLKTYPE_SSOW
);
1660 rvu_attach_block(rvu
, pcifunc
, BLKTYPE_SSOW
,
1661 attach
->ssow
, attach
);
1664 if (attach
->timlfs
) {
1666 rvu_detach_block(rvu
, pcifunc
, BLKTYPE_TIM
);
1667 rvu_attach_block(rvu
, pcifunc
, BLKTYPE_TIM
,
1668 attach
->timlfs
, attach
);
1671 if (attach
->cptlfs
) {
1672 if (attach
->modify
&&
1673 rvu_attach_from_same_block(rvu
, BLKTYPE_CPT
, attach
))
1674 rvu_detach_block(rvu
, pcifunc
, BLKTYPE_CPT
);
1675 rvu_attach_block(rvu
, pcifunc
, BLKTYPE_CPT
,
1676 attach
->cptlfs
, attach
);
1680 mutex_unlock(&rvu
->rsrc_lock
);
1684 static u16
rvu_get_msix_offset(struct rvu
*rvu
, struct rvu_pfvf
*pfvf
,
1685 int blkaddr
, int lf
)
1690 return MSIX_VECTOR_INVALID
;
1692 for (vec
= 0; vec
< pfvf
->msix
.max
; vec
++) {
1693 if (pfvf
->msix_lfmap
[vec
] == MSIX_BLKLF(blkaddr
, lf
))
1696 return MSIX_VECTOR_INVALID
;
1699 static void rvu_set_msix_offset(struct rvu
*rvu
, struct rvu_pfvf
*pfvf
,
1700 struct rvu_block
*block
, int lf
)
1702 u16 nvecs
, vec
, offset
;
1705 cfg
= rvu_read64(rvu
, block
->addr
, block
->msixcfg_reg
|
1706 (lf
<< block
->lfshift
));
1707 nvecs
= (cfg
>> 12) & 0xFF;
1709 /* Check and alloc MSIX vectors, must be contiguous */
1710 if (!rvu_rsrc_check_contig(&pfvf
->msix
, nvecs
))
1713 offset
= rvu_alloc_rsrc_contig(&pfvf
->msix
, nvecs
);
1715 /* Config MSIX offset in LF */
1716 rvu_write64(rvu
, block
->addr
, block
->msixcfg_reg
|
1717 (lf
<< block
->lfshift
), (cfg
& ~0x7FFULL
) | offset
);
1719 /* Update the bitmap as well */
1720 for (vec
= 0; vec
< nvecs
; vec
++)
1721 pfvf
->msix_lfmap
[offset
+ vec
] = MSIX_BLKLF(block
->addr
, lf
);
1724 static void rvu_clear_msix_offset(struct rvu
*rvu
, struct rvu_pfvf
*pfvf
,
1725 struct rvu_block
*block
, int lf
)
1727 u16 nvecs
, vec
, offset
;
1730 cfg
= rvu_read64(rvu
, block
->addr
, block
->msixcfg_reg
|
1731 (lf
<< block
->lfshift
));
1732 nvecs
= (cfg
>> 12) & 0xFF;
1734 /* Clear MSIX offset in LF */
1735 rvu_write64(rvu
, block
->addr
, block
->msixcfg_reg
|
1736 (lf
<< block
->lfshift
), cfg
& ~0x7FFULL
);
1738 offset
= rvu_get_msix_offset(rvu
, pfvf
, block
->addr
, lf
);
1740 /* Update the mapping */
1741 for (vec
= 0; vec
< nvecs
; vec
++)
1742 pfvf
->msix_lfmap
[offset
+ vec
] = 0;
1744 /* Free the same in MSIX bitmap */
1745 rvu_free_rsrc_contig(&pfvf
->msix
, nvecs
, offset
);
1748 int rvu_mbox_handler_msix_offset(struct rvu
*rvu
, struct msg_req
*req
,
1749 struct msix_offset_rsp
*rsp
)
1751 struct rvu_hwinfo
*hw
= rvu
->hw
;
1752 u16 pcifunc
= req
->hdr
.pcifunc
;
1753 struct rvu_pfvf
*pfvf
;
1754 int lf
, slot
, blkaddr
;
1756 pfvf
= rvu_get_pfvf(rvu
, pcifunc
);
1757 if (!pfvf
->msix
.bmap
)
1760 /* Set MSIX offsets for each block's LFs attached to this PF/VF */
1761 lf
= rvu_get_lf(rvu
, &hw
->block
[BLKADDR_NPA
], pcifunc
, 0);
1762 rsp
->npa_msixoff
= rvu_get_msix_offset(rvu
, pfvf
, BLKADDR_NPA
, lf
);
1764 /* Get BLKADDR from which LFs are attached to pcifunc */
1765 blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NIX
, pcifunc
);
1767 rsp
->nix_msixoff
= MSIX_VECTOR_INVALID
;
1769 lf
= rvu_get_lf(rvu
, &hw
->block
[blkaddr
], pcifunc
, 0);
1770 rsp
->nix_msixoff
= rvu_get_msix_offset(rvu
, pfvf
, blkaddr
, lf
);
1773 rsp
->sso
= pfvf
->sso
;
1774 for (slot
= 0; slot
< rsp
->sso
; slot
++) {
1775 lf
= rvu_get_lf(rvu
, &hw
->block
[BLKADDR_SSO
], pcifunc
, slot
);
1776 rsp
->sso_msixoff
[slot
] =
1777 rvu_get_msix_offset(rvu
, pfvf
, BLKADDR_SSO
, lf
);
1780 rsp
->ssow
= pfvf
->ssow
;
1781 for (slot
= 0; slot
< rsp
->ssow
; slot
++) {
1782 lf
= rvu_get_lf(rvu
, &hw
->block
[BLKADDR_SSOW
], pcifunc
, slot
);
1783 rsp
->ssow_msixoff
[slot
] =
1784 rvu_get_msix_offset(rvu
, pfvf
, BLKADDR_SSOW
, lf
);
1787 rsp
->timlfs
= pfvf
->timlfs
;
1788 for (slot
= 0; slot
< rsp
->timlfs
; slot
++) {
1789 lf
= rvu_get_lf(rvu
, &hw
->block
[BLKADDR_TIM
], pcifunc
, slot
);
1790 rsp
->timlf_msixoff
[slot
] =
1791 rvu_get_msix_offset(rvu
, pfvf
, BLKADDR_TIM
, lf
);
1794 rsp
->cptlfs
= pfvf
->cptlfs
;
1795 for (slot
= 0; slot
< rsp
->cptlfs
; slot
++) {
1796 lf
= rvu_get_lf(rvu
, &hw
->block
[BLKADDR_CPT0
], pcifunc
, slot
);
1797 rsp
->cptlf_msixoff
[slot
] =
1798 rvu_get_msix_offset(rvu
, pfvf
, BLKADDR_CPT0
, lf
);
1801 rsp
->cpt1_lfs
= pfvf
->cpt1_lfs
;
1802 for (slot
= 0; slot
< rsp
->cpt1_lfs
; slot
++) {
1803 lf
= rvu_get_lf(rvu
, &hw
->block
[BLKADDR_CPT1
], pcifunc
, slot
);
1804 rsp
->cpt1_lf_msixoff
[slot
] =
1805 rvu_get_msix_offset(rvu
, pfvf
, BLKADDR_CPT1
, lf
);
1811 int rvu_mbox_handler_free_rsrc_cnt(struct rvu
*rvu
, struct msg_req
*req
,
1812 struct free_rsrcs_rsp
*rsp
)
1814 struct rvu_hwinfo
*hw
= rvu
->hw
;
1815 struct rvu_block
*block
;
1816 struct nix_txsch
*txsch
;
1817 struct nix_hw
*nix_hw
;
1819 mutex_lock(&rvu
->rsrc_lock
);
1821 block
= &hw
->block
[BLKADDR_NPA
];
1822 rsp
->npa
= rvu_rsrc_free_count(&block
->lf
);
1824 block
= &hw
->block
[BLKADDR_NIX0
];
1825 rsp
->nix
= rvu_rsrc_free_count(&block
->lf
);
1827 block
= &hw
->block
[BLKADDR_NIX1
];
1828 rsp
->nix1
= rvu_rsrc_free_count(&block
->lf
);
1830 block
= &hw
->block
[BLKADDR_SSO
];
1831 rsp
->sso
= rvu_rsrc_free_count(&block
->lf
);
1833 block
= &hw
->block
[BLKADDR_SSOW
];
1834 rsp
->ssow
= rvu_rsrc_free_count(&block
->lf
);
1836 block
= &hw
->block
[BLKADDR_TIM
];
1837 rsp
->tim
= rvu_rsrc_free_count(&block
->lf
);
1839 block
= &hw
->block
[BLKADDR_CPT0
];
1840 rsp
->cpt
= rvu_rsrc_free_count(&block
->lf
);
1842 block
= &hw
->block
[BLKADDR_CPT1
];
1843 rsp
->cpt1
= rvu_rsrc_free_count(&block
->lf
);
1845 if (rvu
->hw
->cap
.nix_fixed_txschq_mapping
) {
1846 rsp
->schq
[NIX_TXSCH_LVL_SMQ
] = 1;
1847 rsp
->schq
[NIX_TXSCH_LVL_TL4
] = 1;
1848 rsp
->schq
[NIX_TXSCH_LVL_TL3
] = 1;
1849 rsp
->schq
[NIX_TXSCH_LVL_TL2
] = 1;
1851 if (!is_block_implemented(rvu
->hw
, BLKADDR_NIX1
))
1853 rsp
->schq_nix1
[NIX_TXSCH_LVL_SMQ
] = 1;
1854 rsp
->schq_nix1
[NIX_TXSCH_LVL_TL4
] = 1;
1855 rsp
->schq_nix1
[NIX_TXSCH_LVL_TL3
] = 1;
1856 rsp
->schq_nix1
[NIX_TXSCH_LVL_TL2
] = 1;
1858 nix_hw
= get_nix_hw(hw
, BLKADDR_NIX0
);
1859 txsch
= &nix_hw
->txsch
[NIX_TXSCH_LVL_SMQ
];
1860 rsp
->schq
[NIX_TXSCH_LVL_SMQ
] =
1861 rvu_rsrc_free_count(&txsch
->schq
);
1863 txsch
= &nix_hw
->txsch
[NIX_TXSCH_LVL_TL4
];
1864 rsp
->schq
[NIX_TXSCH_LVL_TL4
] =
1865 rvu_rsrc_free_count(&txsch
->schq
);
1867 txsch
= &nix_hw
->txsch
[NIX_TXSCH_LVL_TL3
];
1868 rsp
->schq
[NIX_TXSCH_LVL_TL3
] =
1869 rvu_rsrc_free_count(&txsch
->schq
);
1871 txsch
= &nix_hw
->txsch
[NIX_TXSCH_LVL_TL2
];
1872 rsp
->schq
[NIX_TXSCH_LVL_TL2
] =
1873 rvu_rsrc_free_count(&txsch
->schq
);
1875 if (!is_block_implemented(rvu
->hw
, BLKADDR_NIX1
))
1878 nix_hw
= get_nix_hw(hw
, BLKADDR_NIX1
);
1879 txsch
= &nix_hw
->txsch
[NIX_TXSCH_LVL_SMQ
];
1880 rsp
->schq_nix1
[NIX_TXSCH_LVL_SMQ
] =
1881 rvu_rsrc_free_count(&txsch
->schq
);
1883 txsch
= &nix_hw
->txsch
[NIX_TXSCH_LVL_TL4
];
1884 rsp
->schq_nix1
[NIX_TXSCH_LVL_TL4
] =
1885 rvu_rsrc_free_count(&txsch
->schq
);
1887 txsch
= &nix_hw
->txsch
[NIX_TXSCH_LVL_TL3
];
1888 rsp
->schq_nix1
[NIX_TXSCH_LVL_TL3
] =
1889 rvu_rsrc_free_count(&txsch
->schq
);
1891 txsch
= &nix_hw
->txsch
[NIX_TXSCH_LVL_TL2
];
1892 rsp
->schq_nix1
[NIX_TXSCH_LVL_TL2
] =
1893 rvu_rsrc_free_count(&txsch
->schq
);
1896 rsp
->schq_nix1
[NIX_TXSCH_LVL_TL1
] = 1;
1898 rsp
->schq
[NIX_TXSCH_LVL_TL1
] = 1;
1899 mutex_unlock(&rvu
->rsrc_lock
);
1904 int rvu_mbox_handler_vf_flr(struct rvu
*rvu
, struct msg_req
*req
,
1905 struct msg_rsp
*rsp
)
1907 u16 pcifunc
= req
->hdr
.pcifunc
;
1911 vf
= pcifunc
& RVU_PFVF_FUNC_MASK
;
1912 cfg
= rvu_read64(rvu
, BLKADDR_RVUM
,
1913 RVU_PRIV_PFX_CFG(rvu_get_pf(pcifunc
)));
1914 numvfs
= (cfg
>> 12) & 0xFF;
1916 if (vf
&& vf
<= numvfs
)
1917 __rvu_flr_handler(rvu
, pcifunc
);
1919 return RVU_INVALID_VF_ID
;
1924 int rvu_mbox_handler_get_hw_cap(struct rvu
*rvu
, struct msg_req
*req
,
1925 struct get_hw_cap_rsp
*rsp
)
1927 struct rvu_hwinfo
*hw
= rvu
->hw
;
1929 rsp
->nix_fixed_txschq_mapping
= hw
->cap
.nix_fixed_txschq_mapping
;
1930 rsp
->nix_shaping
= hw
->cap
.nix_shaping
;
1935 int rvu_mbox_handler_set_vf_perm(struct rvu
*rvu
, struct set_vf_perm
*req
,
1936 struct msg_rsp
*rsp
)
1938 struct rvu_hwinfo
*hw
= rvu
->hw
;
1939 u16 pcifunc
= req
->hdr
.pcifunc
;
1940 struct rvu_pfvf
*pfvf
;
1944 /* Only PF can add VF permissions */
1945 if ((pcifunc
& RVU_PFVF_FUNC_MASK
) || is_afvf(pcifunc
))
1948 target
= (pcifunc
& ~RVU_PFVF_FUNC_MASK
) | (req
->vf
+ 1);
1949 pfvf
= rvu_get_pfvf(rvu
, target
);
1951 if (req
->flags
& RESET_VF_PERM
) {
1952 pfvf
->flags
&= RVU_CLEAR_VF_PERM
;
1953 } else if (test_bit(PF_SET_VF_TRUSTED
, &pfvf
->flags
) ^
1954 (req
->flags
& VF_TRUSTED
)) {
1955 change_bit(PF_SET_VF_TRUSTED
, &pfvf
->flags
);
1956 /* disable multicast and promisc entries */
1957 if (!test_bit(PF_SET_VF_TRUSTED
, &pfvf
->flags
)) {
1958 blkaddr
= rvu_get_blkaddr(rvu
, BLKTYPE_NIX
, target
);
1961 nixlf
= rvu_get_lf(rvu
, &hw
->block
[blkaddr
],
1965 npc_enadis_default_mce_entry(rvu
, target
, nixlf
,
1966 NIXLF_ALLMULTI_ENTRY
,
1968 npc_enadis_default_mce_entry(rvu
, target
, nixlf
,
1969 NIXLF_PROMISC_ENTRY
,
1977 static int rvu_process_mbox_msg(struct otx2_mbox
*mbox
, int devid
,
1978 struct mbox_msghdr
*req
)
1980 struct rvu
*rvu
= pci_get_drvdata(mbox
->pdev
);
1982 /* Check if valid, if not reply with a invalid msg */
1983 if (req
->sig
!= OTX2_MBOX_REQ_SIG
)
1987 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \
1989 struct _rsp_type *rsp; \
1992 rsp = (struct _rsp_type *)otx2_mbox_alloc_msg( \
1994 sizeof(struct _rsp_type)); \
1995 /* some handlers should complete even if reply */ \
1996 /* could not be allocated */ \
1998 _id != MBOX_MSG_DETACH_RESOURCES && \
1999 _id != MBOX_MSG_NIX_TXSCH_FREE && \
2000 _id != MBOX_MSG_VF_FLR) \
2003 rsp->hdr.id = _id; \
2004 rsp->hdr.sig = OTX2_MBOX_RSP_SIG; \
2005 rsp->hdr.pcifunc = req->pcifunc; \
2009 err = rvu_mbox_handler_ ## _fn_name(rvu, \
2010 (struct _req_type *)req, \
2013 rsp->hdr.rc = err; \
2015 trace_otx2_msg_process(mbox->pdev, _id, err); \
2016 return rsp ? err : -ENOMEM; \
2023 otx2_reply_invalid_msg(mbox
, devid
, req
->pcifunc
, req
->id
);
2028 static void __rvu_mbox_handler(struct rvu_work
*mwork
, int type
)
2030 struct rvu
*rvu
= mwork
->rvu
;
2031 int offset
, err
, id
, devid
;
2032 struct otx2_mbox_dev
*mdev
;
2033 struct mbox_hdr
*req_hdr
;
2034 struct mbox_msghdr
*msg
;
2035 struct mbox_wq_info
*mw
;
2036 struct otx2_mbox
*mbox
;
2040 mw
= &rvu
->afpf_wq_info
;
2043 mw
= &rvu
->afvf_wq_info
;
2049 devid
= mwork
- mw
->mbox_wrk
;
2051 mdev
= &mbox
->dev
[devid
];
2053 /* Process received mbox messages */
2054 req_hdr
= mdev
->mbase
+ mbox
->rx_start
;
2055 if (mw
->mbox_wrk
[devid
].num_msgs
== 0)
2058 offset
= mbox
->rx_start
+ ALIGN(sizeof(*req_hdr
), MBOX_MSG_ALIGN
);
2060 for (id
= 0; id
< mw
->mbox_wrk
[devid
].num_msgs
; id
++) {
2061 msg
= mdev
->mbase
+ offset
;
2063 /* Set which PF/VF sent this message based on mbox IRQ */
2067 ~(RVU_PFVF_PF_MASK
<< RVU_PFVF_PF_SHIFT
);
2068 msg
->pcifunc
|= (devid
<< RVU_PFVF_PF_SHIFT
);
2072 ~(RVU_PFVF_FUNC_MASK
<< RVU_PFVF_FUNC_SHIFT
);
2073 msg
->pcifunc
|= (devid
<< RVU_PFVF_FUNC_SHIFT
) + 1;
2077 err
= rvu_process_mbox_msg(mbox
, devid
, msg
);
2079 offset
= mbox
->rx_start
+ msg
->next_msgoff
;
2083 if (msg
->pcifunc
& RVU_PFVF_FUNC_MASK
)
2084 dev_warn(rvu
->dev
, "Error %d when processing message %s (0x%x) from PF%d:VF%d\n",
2085 err
, otx2_mbox_id2name(msg
->id
),
2086 msg
->id
, rvu_get_pf(msg
->pcifunc
),
2087 (msg
->pcifunc
& RVU_PFVF_FUNC_MASK
) - 1);
2089 dev_warn(rvu
->dev
, "Error %d when processing message %s (0x%x) from PF%d\n",
2090 err
, otx2_mbox_id2name(msg
->id
),
2093 mw
->mbox_wrk
[devid
].num_msgs
= 0;
2095 /* Send mbox responses to VF/PF */
2096 otx2_mbox_msg_send(mbox
, devid
);
2099 static inline void rvu_afpf_mbox_handler(struct work_struct
*work
)
2101 struct rvu_work
*mwork
= container_of(work
, struct rvu_work
, work
);
2103 __rvu_mbox_handler(mwork
, TYPE_AFPF
);
2106 static inline void rvu_afvf_mbox_handler(struct work_struct
*work
)
2108 struct rvu_work
*mwork
= container_of(work
, struct rvu_work
, work
);
2110 __rvu_mbox_handler(mwork
, TYPE_AFVF
);
2113 static void __rvu_mbox_up_handler(struct rvu_work
*mwork
, int type
)
2115 struct rvu
*rvu
= mwork
->rvu
;
2116 struct otx2_mbox_dev
*mdev
;
2117 struct mbox_hdr
*rsp_hdr
;
2118 struct mbox_msghdr
*msg
;
2119 struct mbox_wq_info
*mw
;
2120 struct otx2_mbox
*mbox
;
2121 int offset
, id
, devid
;
2125 mw
= &rvu
->afpf_wq_info
;
2128 mw
= &rvu
->afvf_wq_info
;
2134 devid
= mwork
- mw
->mbox_wrk_up
;
2135 mbox
= &mw
->mbox_up
;
2136 mdev
= &mbox
->dev
[devid
];
2138 rsp_hdr
= mdev
->mbase
+ mbox
->rx_start
;
2139 if (mw
->mbox_wrk_up
[devid
].up_num_msgs
== 0) {
2140 dev_warn(rvu
->dev
, "mbox up handler: num_msgs = 0\n");
2144 offset
= mbox
->rx_start
+ ALIGN(sizeof(*rsp_hdr
), MBOX_MSG_ALIGN
);
2146 for (id
= 0; id
< mw
->mbox_wrk_up
[devid
].up_num_msgs
; id
++) {
2147 msg
= mdev
->mbase
+ offset
;
2149 if (msg
->id
>= MBOX_MSG_MAX
) {
2151 "Mbox msg with unknown ID 0x%x\n", msg
->id
);
2155 if (msg
->sig
!= OTX2_MBOX_RSP_SIG
) {
2157 "Mbox msg with wrong signature %x, ID 0x%x\n",
2163 case MBOX_MSG_CGX_LINK_EVENT
:
2168 "Mbox msg response has err %d, ID 0x%x\n",
2173 offset
= mbox
->rx_start
+ msg
->next_msgoff
;
2176 mw
->mbox_wrk_up
[devid
].up_num_msgs
= 0;
2178 otx2_mbox_reset(mbox
, devid
);
2181 static inline void rvu_afpf_mbox_up_handler(struct work_struct
*work
)
2183 struct rvu_work
*mwork
= container_of(work
, struct rvu_work
, work
);
2185 __rvu_mbox_up_handler(mwork
, TYPE_AFPF
);
2188 static inline void rvu_afvf_mbox_up_handler(struct work_struct
*work
)
2190 struct rvu_work
*mwork
= container_of(work
, struct rvu_work
, work
);
2192 __rvu_mbox_up_handler(mwork
, TYPE_AFVF
);
2195 static int rvu_get_mbox_regions(struct rvu
*rvu
, void **mbox_addr
,
2198 struct rvu_hwinfo
*hw
= rvu
->hw
;
2202 /* For cn10k platform VF mailbox regions of a PF follows after the
2203 * PF <-> AF mailbox region. Whereas for Octeontx2 it is read from
2204 * RVU_PF_VF_BAR4_ADDR register.
2206 if (type
== TYPE_AFVF
) {
2207 for (region
= 0; region
< num
; region
++) {
2208 if (hw
->cap
.per_pf_mbox_regs
) {
2209 bar4
= rvu_read64(rvu
, BLKADDR_RVUM
,
2210 RVU_AF_PFX_BAR4_ADDR(0)) +
2212 bar4
+= region
* MBOX_SIZE
;
2214 bar4
= rvupf_read64(rvu
, RVU_PF_VF_BAR4_ADDR
);
2215 bar4
+= region
* MBOX_SIZE
;
2217 mbox_addr
[region
] = (void *)ioremap_wc(bar4
, MBOX_SIZE
);
2218 if (!mbox_addr
[region
])
2224 /* For cn10k platform AF <-> PF mailbox region of a PF is read from per
2225 * PF registers. Whereas for Octeontx2 it is read from
2226 * RVU_AF_PF_BAR4_ADDR register.
2228 for (region
= 0; region
< num
; region
++) {
2229 if (hw
->cap
.per_pf_mbox_regs
) {
2230 bar4
= rvu_read64(rvu
, BLKADDR_RVUM
,
2231 RVU_AF_PFX_BAR4_ADDR(region
));
2233 bar4
= rvu_read64(rvu
, BLKADDR_RVUM
,
2234 RVU_AF_PF_BAR4_ADDR
);
2235 bar4
+= region
* MBOX_SIZE
;
2237 mbox_addr
[region
] = (void *)ioremap_wc(bar4
, MBOX_SIZE
);
2238 if (!mbox_addr
[region
])
2245 iounmap((void __iomem
*)mbox_addr
[region
]);
2249 static int rvu_mbox_init(struct rvu
*rvu
, struct mbox_wq_info
*mw
,
2251 void (mbox_handler
)(struct work_struct
*),
2252 void (mbox_up_handler
)(struct work_struct
*))
2254 int err
= -EINVAL
, i
, dir
, dir_up
;
2255 void __iomem
*reg_base
;
2256 struct rvu_work
*mwork
;
2257 void **mbox_regions
;
2260 mbox_regions
= kcalloc(num
, sizeof(void *), GFP_KERNEL
);
2266 name
= "rvu_afpf_mailbox";
2267 dir
= MBOX_DIR_AFPF
;
2268 dir_up
= MBOX_DIR_AFPF_UP
;
2269 reg_base
= rvu
->afreg_base
;
2270 err
= rvu_get_mbox_regions(rvu
, mbox_regions
, num
, TYPE_AFPF
);
2275 name
= "rvu_afvf_mailbox";
2276 dir
= MBOX_DIR_PFVF
;
2277 dir_up
= MBOX_DIR_PFVF_UP
;
2278 reg_base
= rvu
->pfreg_base
;
2279 err
= rvu_get_mbox_regions(rvu
, mbox_regions
, num
, TYPE_AFVF
);
2287 mw
->mbox_wq
= alloc_workqueue(name
,
2288 WQ_UNBOUND
| WQ_HIGHPRI
| WQ_MEM_RECLAIM
,
2295 mw
->mbox_wrk
= devm_kcalloc(rvu
->dev
, num
,
2296 sizeof(struct rvu_work
), GFP_KERNEL
);
2297 if (!mw
->mbox_wrk
) {
2302 mw
->mbox_wrk_up
= devm_kcalloc(rvu
->dev
, num
,
2303 sizeof(struct rvu_work
), GFP_KERNEL
);
2304 if (!mw
->mbox_wrk_up
) {
2309 err
= otx2_mbox_regions_init(&mw
->mbox
, mbox_regions
, rvu
->pdev
,
2310 reg_base
, dir
, num
);
2314 err
= otx2_mbox_regions_init(&mw
->mbox_up
, mbox_regions
, rvu
->pdev
,
2315 reg_base
, dir_up
, num
);
2319 for (i
= 0; i
< num
; i
++) {
2320 mwork
= &mw
->mbox_wrk
[i
];
2322 INIT_WORK(&mwork
->work
, mbox_handler
);
2324 mwork
= &mw
->mbox_wrk_up
[i
];
2326 INIT_WORK(&mwork
->work
, mbox_up_handler
);
2328 kfree(mbox_regions
);
2332 destroy_workqueue(mw
->mbox_wq
);
2335 iounmap((void __iomem
*)mbox_regions
[num
]);
2337 kfree(mbox_regions
);
2341 static void rvu_mbox_destroy(struct mbox_wq_info
*mw
)
2343 struct otx2_mbox
*mbox
= &mw
->mbox
;
2344 struct otx2_mbox_dev
*mdev
;
2348 flush_workqueue(mw
->mbox_wq
);
2349 destroy_workqueue(mw
->mbox_wq
);
2353 for (devid
= 0; devid
< mbox
->ndevs
; devid
++) {
2354 mdev
= &mbox
->dev
[devid
];
2356 iounmap((void __iomem
*)mdev
->hwbase
);
2359 otx2_mbox_destroy(&mw
->mbox
);
2360 otx2_mbox_destroy(&mw
->mbox_up
);
2363 static void rvu_queue_work(struct mbox_wq_info
*mw
, int first
,
2364 int mdevs
, u64 intr
)
2366 struct otx2_mbox_dev
*mdev
;
2367 struct otx2_mbox
*mbox
;
2368 struct mbox_hdr
*hdr
;
2371 for (i
= first
; i
< mdevs
; i
++) {
2373 if (!(intr
& BIT_ULL(i
- first
)))
2377 mdev
= &mbox
->dev
[i
];
2378 hdr
= mdev
->mbase
+ mbox
->rx_start
;
2380 /*The hdr->num_msgs is set to zero immediately in the interrupt
2381 * handler to ensure that it holds a correct value next time
2382 * when the interrupt handler is called.
2383 * pf->mbox.num_msgs holds the data for use in pfaf_mbox_handler
2384 * pf>mbox.up_num_msgs holds the data for use in
2385 * pfaf_mbox_up_handler.
2388 if (hdr
->num_msgs
) {
2389 mw
->mbox_wrk
[i
].num_msgs
= hdr
->num_msgs
;
2391 queue_work(mw
->mbox_wq
, &mw
->mbox_wrk
[i
].work
);
2393 mbox
= &mw
->mbox_up
;
2394 mdev
= &mbox
->dev
[i
];
2395 hdr
= mdev
->mbase
+ mbox
->rx_start
;
2396 if (hdr
->num_msgs
) {
2397 mw
->mbox_wrk_up
[i
].up_num_msgs
= hdr
->num_msgs
;
2399 queue_work(mw
->mbox_wq
, &mw
->mbox_wrk_up
[i
].work
);
2404 static irqreturn_t
rvu_mbox_intr_handler(int irq
, void *rvu_irq
)
2406 struct rvu
*rvu
= (struct rvu
*)rvu_irq
;
2410 intr
= rvu_read64(rvu
, BLKADDR_RVUM
, RVU_AF_PFAF_MBOX_INT
);
2411 /* Clear interrupts */
2412 rvu_write64(rvu
, BLKADDR_RVUM
, RVU_AF_PFAF_MBOX_INT
, intr
);
2414 trace_otx2_msg_interrupt(rvu
->pdev
, "PF(s) to AF", intr
);
2416 /* Sync with mbox memory region */
2419 rvu_queue_work(&rvu
->afpf_wq_info
, 0, rvu
->hw
->total_pfs
, intr
);
2421 /* Handle VF interrupts */
2423 intr
= rvupf_read64(rvu
, RVU_PF_VFPF_MBOX_INTX(1));
2424 rvupf_write64(rvu
, RVU_PF_VFPF_MBOX_INTX(1), intr
);
2426 rvu_queue_work(&rvu
->afvf_wq_info
, 64, vfs
, intr
);
2430 intr
= rvupf_read64(rvu
, RVU_PF_VFPF_MBOX_INTX(0));
2431 rvupf_write64(rvu
, RVU_PF_VFPF_MBOX_INTX(0), intr
);
2433 trace_otx2_msg_interrupt(rvu
->pdev
, "VF(s) to AF", intr
);
2435 rvu_queue_work(&rvu
->afvf_wq_info
, 0, vfs
, intr
);
2440 static void rvu_enable_mbox_intr(struct rvu
*rvu
)
2442 struct rvu_hwinfo
*hw
= rvu
->hw
;
2444 /* Clear spurious irqs, if any */
2445 rvu_write64(rvu
, BLKADDR_RVUM
,
2446 RVU_AF_PFAF_MBOX_INT
, INTR_MASK(hw
->total_pfs
));
2448 /* Enable mailbox interrupt for all PFs except PF0 i.e AF itself */
2449 rvu_write64(rvu
, BLKADDR_RVUM
, RVU_AF_PFAF_MBOX_INT_ENA_W1S
,
2450 INTR_MASK(hw
->total_pfs
) & ~1ULL);
2453 static void rvu_blklf_teardown(struct rvu
*rvu
, u16 pcifunc
, u8 blkaddr
)
2455 struct rvu_block
*block
;
2456 int slot
, lf
, num_lfs
;
2459 block
= &rvu
->hw
->block
[blkaddr
];
2460 num_lfs
= rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu
, pcifunc
),
2464 for (slot
= 0; slot
< num_lfs
; slot
++) {
2465 lf
= rvu_get_lf(rvu
, block
, pcifunc
, slot
);
2469 /* Cleanup LF and reset it */
2470 if (block
->addr
== BLKADDR_NIX0
|| block
->addr
== BLKADDR_NIX1
)
2471 rvu_nix_lf_teardown(rvu
, pcifunc
, block
->addr
, lf
);
2472 else if (block
->addr
== BLKADDR_NPA
)
2473 rvu_npa_lf_teardown(rvu
, pcifunc
, lf
);
2474 else if ((block
->addr
== BLKADDR_CPT0
) ||
2475 (block
->addr
== BLKADDR_CPT1
))
2476 rvu_cpt_lf_teardown(rvu
, pcifunc
, lf
, slot
);
2478 err
= rvu_lf_reset(rvu
, block
, lf
);
2480 dev_err(rvu
->dev
, "Failed to reset blkaddr %d LF%d\n",
2486 static void __rvu_flr_handler(struct rvu
*rvu
, u16 pcifunc
)
2488 mutex_lock(&rvu
->flr_lock
);
2489 /* Reset order should reflect inter-block dependencies:
2490 * 1. Reset any packet/work sources (NIX, CPT, TIM)
2491 * 2. Flush and reset SSO/SSOW
2492 * 3. Cleanup pools (NPA)
2494 rvu_blklf_teardown(rvu
, pcifunc
, BLKADDR_NIX0
);
2495 rvu_blklf_teardown(rvu
, pcifunc
, BLKADDR_NIX1
);
2496 rvu_blklf_teardown(rvu
, pcifunc
, BLKADDR_CPT0
);
2497 rvu_blklf_teardown(rvu
, pcifunc
, BLKADDR_CPT1
);
2498 rvu_blklf_teardown(rvu
, pcifunc
, BLKADDR_TIM
);
2499 rvu_blklf_teardown(rvu
, pcifunc
, BLKADDR_SSOW
);
2500 rvu_blklf_teardown(rvu
, pcifunc
, BLKADDR_SSO
);
2501 rvu_blklf_teardown(rvu
, pcifunc
, BLKADDR_NPA
);
2502 rvu_reset_lmt_map_tbl(rvu
, pcifunc
);
2503 rvu_detach_rsrcs(rvu
, NULL
, pcifunc
);
2504 mutex_unlock(&rvu
->flr_lock
);
2507 static void rvu_afvf_flr_handler(struct rvu
*rvu
, int vf
)
2511 /* pcifunc = 0(PF0) | (vf + 1) */
2512 __rvu_flr_handler(rvu
, vf
+ 1);
2519 /* Signal FLR finish and enable IRQ */
2520 rvupf_write64(rvu
, RVU_PF_VFTRPENDX(reg
), BIT_ULL(vf
));
2521 rvupf_write64(rvu
, RVU_PF_VFFLR_INT_ENA_W1SX(reg
), BIT_ULL(vf
));
2524 static void rvu_flr_handler(struct work_struct
*work
)
2526 struct rvu_work
*flrwork
= container_of(work
, struct rvu_work
, work
);
2527 struct rvu
*rvu
= flrwork
->rvu
;
2528 u16 pcifunc
, numvfs
, vf
;
2532 pf
= flrwork
- rvu
->flr_wrk
;
2533 if (pf
>= rvu
->hw
->total_pfs
) {
2534 rvu_afvf_flr_handler(rvu
, pf
- rvu
->hw
->total_pfs
);
2538 cfg
= rvu_read64(rvu
, BLKADDR_RVUM
, RVU_PRIV_PFX_CFG(pf
));
2539 numvfs
= (cfg
>> 12) & 0xFF;
2540 pcifunc
= pf
<< RVU_PFVF_PF_SHIFT
;
2542 for (vf
= 0; vf
< numvfs
; vf
++)
2543 __rvu_flr_handler(rvu
, (pcifunc
| (vf
+ 1)));
2545 __rvu_flr_handler(rvu
, pcifunc
);
2547 /* Signal FLR finish */
2548 rvu_write64(rvu
, BLKADDR_RVUM
, RVU_AF_PFTRPEND
, BIT_ULL(pf
));
2550 /* Enable interrupt */
2551 rvu_write64(rvu
, BLKADDR_RVUM
, RVU_AF_PFFLR_INT_ENA_W1S
, BIT_ULL(pf
));
2554 static void rvu_afvf_queue_flr_work(struct rvu
*rvu
, int start_vf
, int numvfs
)
2556 int dev
, vf
, reg
= 0;
2562 intr
= rvupf_read64(rvu
, RVU_PF_VFFLR_INTX(reg
));
2566 for (vf
= 0; vf
< numvfs
; vf
++) {
2567 if (!(intr
& BIT_ULL(vf
)))
2569 /* Clear and disable the interrupt */
2570 rvupf_write64(rvu
, RVU_PF_VFFLR_INTX(reg
), BIT_ULL(vf
));
2571 rvupf_write64(rvu
, RVU_PF_VFFLR_INT_ENA_W1CX(reg
), BIT_ULL(vf
));
2573 dev
= vf
+ start_vf
+ rvu
->hw
->total_pfs
;
2574 queue_work(rvu
->flr_wq
, &rvu
->flr_wrk
[dev
].work
);
2578 static irqreturn_t
rvu_flr_intr_handler(int irq
, void *rvu_irq
)
2580 struct rvu
*rvu
= (struct rvu
*)rvu_irq
;
2584 intr
= rvu_read64(rvu
, BLKADDR_RVUM
, RVU_AF_PFFLR_INT
);
2588 for (pf
= 0; pf
< rvu
->hw
->total_pfs
; pf
++) {
2589 if (intr
& (1ULL << pf
)) {
2590 /* clear interrupt */
2591 rvu_write64(rvu
, BLKADDR_RVUM
, RVU_AF_PFFLR_INT
,
2593 /* Disable the interrupt */
2594 rvu_write64(rvu
, BLKADDR_RVUM
, RVU_AF_PFFLR_INT_ENA_W1C
,
2596 /* PF is already dead do only AF related operations */
2597 queue_work(rvu
->flr_wq
, &rvu
->flr_wrk
[pf
].work
);
2602 rvu_afvf_queue_flr_work(rvu
, 0, 64);
2604 rvu_afvf_queue_flr_work(rvu
, 64, rvu
->vfs
- 64);
2609 static void rvu_me_handle_vfset(struct rvu
*rvu
, int idx
, u64 intr
)
2613 /* Nothing to be done here other than clearing the
2616 for (vf
= 0; vf
< 64; vf
++) {
2617 if (intr
& (1ULL << vf
)) {
2618 /* clear the trpend due to ME(master enable) */
2619 rvupf_write64(rvu
, RVU_PF_VFTRPENDX(idx
), BIT_ULL(vf
));
2620 /* clear interrupt */
2621 rvupf_write64(rvu
, RVU_PF_VFME_INTX(idx
), BIT_ULL(vf
));
2626 /* Handles ME interrupts from VFs of AF */
2627 static irqreturn_t
rvu_me_vf_intr_handler(int irq
, void *rvu_irq
)
2629 struct rvu
*rvu
= (struct rvu
*)rvu_irq
;
2633 intr
= rvu_read64(rvu
, BLKADDR_RVUM
, RVU_AF_PFME_INT
);
2635 for (vfset
= 0; vfset
<= 1; vfset
++) {
2636 intr
= rvupf_read64(rvu
, RVU_PF_VFME_INTX(vfset
));
2638 rvu_me_handle_vfset(rvu
, vfset
, intr
);
2644 /* Handles ME interrupts from PFs */
2645 static irqreturn_t
rvu_me_pf_intr_handler(int irq
, void *rvu_irq
)
2647 struct rvu
*rvu
= (struct rvu
*)rvu_irq
;
2651 intr
= rvu_read64(rvu
, BLKADDR_RVUM
, RVU_AF_PFME_INT
);
2653 /* Nothing to be done here other than clearing the
2656 for (pf
= 0; pf
< rvu
->hw
->total_pfs
; pf
++) {
2657 if (intr
& (1ULL << pf
)) {
2658 /* clear the trpend due to ME(master enable) */
2659 rvu_write64(rvu
, BLKADDR_RVUM
, RVU_AF_PFTRPEND
,
2661 /* clear interrupt */
2662 rvu_write64(rvu
, BLKADDR_RVUM
, RVU_AF_PFME_INT
,
2670 static void rvu_unregister_interrupts(struct rvu
*rvu
)
2674 /* Disable the Mbox interrupt */
2675 rvu_write64(rvu
, BLKADDR_RVUM
, RVU_AF_PFAF_MBOX_INT_ENA_W1C
,
2676 INTR_MASK(rvu
->hw
->total_pfs
) & ~1ULL);
2678 /* Disable the PF FLR interrupt */
2679 rvu_write64(rvu
, BLKADDR_RVUM
, RVU_AF_PFFLR_INT_ENA_W1C
,
2680 INTR_MASK(rvu
->hw
->total_pfs
) & ~1ULL);
2682 /* Disable the PF ME interrupt */
2683 rvu_write64(rvu
, BLKADDR_RVUM
, RVU_AF_PFME_INT_ENA_W1C
,
2684 INTR_MASK(rvu
->hw
->total_pfs
) & ~1ULL);
2686 for (irq
= 0; irq
< rvu
->num_vec
; irq
++) {
2687 if (rvu
->irq_allocated
[irq
]) {
2688 free_irq(pci_irq_vector(rvu
->pdev
, irq
), rvu
);
2689 rvu
->irq_allocated
[irq
] = false;
2693 pci_free_irq_vectors(rvu
->pdev
);
2697 static int rvu_afvf_msix_vectors_num_ok(struct rvu
*rvu
)
2699 struct rvu_pfvf
*pfvf
= &rvu
->pf
[0];
2703 offset
= rvu_read64(rvu
, BLKADDR_RVUM
, RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff;
2705 /* Make sure there are enough MSIX vectors configured so that
2706 * VF interrupts can be handled. Offset equal to zero means
2707 * that PF vectors are not configured and overlapping AF vectors.
2709 return (pfvf
->msix
.max
>= RVU_AF_INT_VEC_CNT
+ RVU_PF_INT_VEC_CNT
) &&
2713 static int rvu_register_interrupts(struct rvu
*rvu
)
2715 int ret
, offset
, pf_vec_start
;
2717 rvu
->num_vec
= pci_msix_vec_count(rvu
->pdev
);
2719 rvu
->irq_name
= devm_kmalloc_array(rvu
->dev
, rvu
->num_vec
,
2720 NAME_SIZE
, GFP_KERNEL
);
2724 rvu
->irq_allocated
= devm_kcalloc(rvu
->dev
, rvu
->num_vec
,
2725 sizeof(bool), GFP_KERNEL
);
2726 if (!rvu
->irq_allocated
)
2730 ret
= pci_alloc_irq_vectors(rvu
->pdev
, rvu
->num_vec
,
2731 rvu
->num_vec
, PCI_IRQ_MSIX
);
2734 "RVUAF: Request for %d msix vectors failed, ret %d\n",
2739 /* Register mailbox interrupt handler */
2740 sprintf(&rvu
->irq_name
[RVU_AF_INT_VEC_MBOX
* NAME_SIZE
], "RVUAF Mbox");
2741 ret
= request_irq(pci_irq_vector(rvu
->pdev
, RVU_AF_INT_VEC_MBOX
),
2742 rvu_mbox_intr_handler
, 0,
2743 &rvu
->irq_name
[RVU_AF_INT_VEC_MBOX
* NAME_SIZE
], rvu
);
2746 "RVUAF: IRQ registration failed for mbox irq\n");
2750 rvu
->irq_allocated
[RVU_AF_INT_VEC_MBOX
] = true;
2752 /* Enable mailbox interrupts from all PFs */
2753 rvu_enable_mbox_intr(rvu
);
2755 /* Register FLR interrupt handler */
2756 sprintf(&rvu
->irq_name
[RVU_AF_INT_VEC_PFFLR
* NAME_SIZE
],
2758 ret
= request_irq(pci_irq_vector(rvu
->pdev
, RVU_AF_INT_VEC_PFFLR
),
2759 rvu_flr_intr_handler
, 0,
2760 &rvu
->irq_name
[RVU_AF_INT_VEC_PFFLR
* NAME_SIZE
],
2764 "RVUAF: IRQ registration failed for FLR\n");
2767 rvu
->irq_allocated
[RVU_AF_INT_VEC_PFFLR
] = true;
2769 /* Enable FLR interrupt for all PFs*/
2770 rvu_write64(rvu
, BLKADDR_RVUM
,
2771 RVU_AF_PFFLR_INT
, INTR_MASK(rvu
->hw
->total_pfs
));
2773 rvu_write64(rvu
, BLKADDR_RVUM
, RVU_AF_PFFLR_INT_ENA_W1S
,
2774 INTR_MASK(rvu
->hw
->total_pfs
) & ~1ULL);
2776 /* Register ME interrupt handler */
2777 sprintf(&rvu
->irq_name
[RVU_AF_INT_VEC_PFME
* NAME_SIZE
],
2779 ret
= request_irq(pci_irq_vector(rvu
->pdev
, RVU_AF_INT_VEC_PFME
),
2780 rvu_me_pf_intr_handler
, 0,
2781 &rvu
->irq_name
[RVU_AF_INT_VEC_PFME
* NAME_SIZE
],
2785 "RVUAF: IRQ registration failed for ME\n");
2787 rvu
->irq_allocated
[RVU_AF_INT_VEC_PFME
] = true;
2789 /* Clear TRPEND bit for all PF */
2790 rvu_write64(rvu
, BLKADDR_RVUM
,
2791 RVU_AF_PFTRPEND
, INTR_MASK(rvu
->hw
->total_pfs
));
2792 /* Enable ME interrupt for all PFs*/
2793 rvu_write64(rvu
, BLKADDR_RVUM
,
2794 RVU_AF_PFME_INT
, INTR_MASK(rvu
->hw
->total_pfs
));
2796 rvu_write64(rvu
, BLKADDR_RVUM
, RVU_AF_PFME_INT_ENA_W1S
,
2797 INTR_MASK(rvu
->hw
->total_pfs
) & ~1ULL);
2799 if (!rvu_afvf_msix_vectors_num_ok(rvu
))
2802 /* Get PF MSIX vectors offset. */
2803 pf_vec_start
= rvu_read64(rvu
, BLKADDR_RVUM
,
2804 RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff;
2806 /* Register MBOX0 interrupt. */
2807 offset
= pf_vec_start
+ RVU_PF_INT_VEC_VFPF_MBOX0
;
2808 sprintf(&rvu
->irq_name
[offset
* NAME_SIZE
], "RVUAFVF Mbox0");
2809 ret
= request_irq(pci_irq_vector(rvu
->pdev
, offset
),
2810 rvu_mbox_intr_handler
, 0,
2811 &rvu
->irq_name
[offset
* NAME_SIZE
],
2815 "RVUAF: IRQ registration failed for Mbox0\n");
2817 rvu
->irq_allocated
[offset
] = true;
2819 /* Register MBOX1 interrupt. MBOX1 IRQ number follows MBOX0 so
2820 * simply increment current offset by 1.
2822 offset
= pf_vec_start
+ RVU_PF_INT_VEC_VFPF_MBOX1
;
2823 sprintf(&rvu
->irq_name
[offset
* NAME_SIZE
], "RVUAFVF Mbox1");
2824 ret
= request_irq(pci_irq_vector(rvu
->pdev
, offset
),
2825 rvu_mbox_intr_handler
, 0,
2826 &rvu
->irq_name
[offset
* NAME_SIZE
],
2830 "RVUAF: IRQ registration failed for Mbox1\n");
2832 rvu
->irq_allocated
[offset
] = true;
2834 /* Register FLR interrupt handler for AF's VFs */
2835 offset
= pf_vec_start
+ RVU_PF_INT_VEC_VFFLR0
;
2836 sprintf(&rvu
->irq_name
[offset
* NAME_SIZE
], "RVUAFVF FLR0");
2837 ret
= request_irq(pci_irq_vector(rvu
->pdev
, offset
),
2838 rvu_flr_intr_handler
, 0,
2839 &rvu
->irq_name
[offset
* NAME_SIZE
], rvu
);
2842 "RVUAF: IRQ registration failed for RVUAFVF FLR0\n");
2845 rvu
->irq_allocated
[offset
] = true;
2847 offset
= pf_vec_start
+ RVU_PF_INT_VEC_VFFLR1
;
2848 sprintf(&rvu
->irq_name
[offset
* NAME_SIZE
], "RVUAFVF FLR1");
2849 ret
= request_irq(pci_irq_vector(rvu
->pdev
, offset
),
2850 rvu_flr_intr_handler
, 0,
2851 &rvu
->irq_name
[offset
* NAME_SIZE
], rvu
);
2854 "RVUAF: IRQ registration failed for RVUAFVF FLR1\n");
2857 rvu
->irq_allocated
[offset
] = true;
2859 /* Register ME interrupt handler for AF's VFs */
2860 offset
= pf_vec_start
+ RVU_PF_INT_VEC_VFME0
;
2861 sprintf(&rvu
->irq_name
[offset
* NAME_SIZE
], "RVUAFVF ME0");
2862 ret
= request_irq(pci_irq_vector(rvu
->pdev
, offset
),
2863 rvu_me_vf_intr_handler
, 0,
2864 &rvu
->irq_name
[offset
* NAME_SIZE
], rvu
);
2867 "RVUAF: IRQ registration failed for RVUAFVF ME0\n");
2870 rvu
->irq_allocated
[offset
] = true;
2872 offset
= pf_vec_start
+ RVU_PF_INT_VEC_VFME1
;
2873 sprintf(&rvu
->irq_name
[offset
* NAME_SIZE
], "RVUAFVF ME1");
2874 ret
= request_irq(pci_irq_vector(rvu
->pdev
, offset
),
2875 rvu_me_vf_intr_handler
, 0,
2876 &rvu
->irq_name
[offset
* NAME_SIZE
], rvu
);
2879 "RVUAF: IRQ registration failed for RVUAFVF ME1\n");
2882 rvu
->irq_allocated
[offset
] = true;
2886 rvu_unregister_interrupts(rvu
);
2890 static void rvu_flr_wq_destroy(struct rvu
*rvu
)
2893 flush_workqueue(rvu
->flr_wq
);
2894 destroy_workqueue(rvu
->flr_wq
);
2899 static int rvu_flr_init(struct rvu
*rvu
)
2905 /* Enable FLR for all PFs*/
2906 for (pf
= 0; pf
< rvu
->hw
->total_pfs
; pf
++) {
2907 cfg
= rvu_read64(rvu
, BLKADDR_RVUM
, RVU_PRIV_PFX_CFG(pf
));
2908 rvu_write64(rvu
, BLKADDR_RVUM
, RVU_PRIV_PFX_CFG(pf
),
2912 rvu
->flr_wq
= alloc_workqueue("rvu_afpf_flr",
2913 WQ_UNBOUND
| WQ_HIGHPRI
| WQ_MEM_RECLAIM
,
2918 num_devs
= rvu
->hw
->total_pfs
+ pci_sriov_get_totalvfs(rvu
->pdev
);
2919 rvu
->flr_wrk
= devm_kcalloc(rvu
->dev
, num_devs
,
2920 sizeof(struct rvu_work
), GFP_KERNEL
);
2921 if (!rvu
->flr_wrk
) {
2922 destroy_workqueue(rvu
->flr_wq
);
2926 for (dev
= 0; dev
< num_devs
; dev
++) {
2927 rvu
->flr_wrk
[dev
].rvu
= rvu
;
2928 INIT_WORK(&rvu
->flr_wrk
[dev
].work
, rvu_flr_handler
);
2931 mutex_init(&rvu
->flr_lock
);
2936 static void rvu_disable_afvf_intr(struct rvu
*rvu
)
2940 rvupf_write64(rvu
, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), INTR_MASK(vfs
));
2941 rvupf_write64(rvu
, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs
));
2942 rvupf_write64(rvu
, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs
));
2946 rvupf_write64(rvu
, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1),
2947 INTR_MASK(vfs
- 64));
2948 rvupf_write64(rvu
, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs
- 64));
2949 rvupf_write64(rvu
, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs
- 64));
2952 static void rvu_enable_afvf_intr(struct rvu
*rvu
)
2956 /* Clear any pending interrupts and enable AF VF interrupts for
2960 rvupf_write64(rvu
, RVU_PF_VFPF_MBOX_INTX(0), INTR_MASK(vfs
));
2961 rvupf_write64(rvu
, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0), INTR_MASK(vfs
));
2964 rvupf_write64(rvu
, RVU_PF_VFFLR_INTX(0), INTR_MASK(vfs
));
2965 rvupf_write64(rvu
, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(vfs
));
2966 rvupf_write64(rvu
, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(vfs
));
2968 /* Same for remaining VFs, if any. */
2972 rvupf_write64(rvu
, RVU_PF_VFPF_MBOX_INTX(1), INTR_MASK(vfs
- 64));
2973 rvupf_write64(rvu
, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
2974 INTR_MASK(vfs
- 64));
2976 rvupf_write64(rvu
, RVU_PF_VFFLR_INTX(1), INTR_MASK(vfs
- 64));
2977 rvupf_write64(rvu
, RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(vfs
- 64));
2978 rvupf_write64(rvu
, RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(vfs
- 64));
2981 int rvu_get_num_lbk_chans(void)
2983 struct pci_dev
*pdev
;
2987 pdev
= pci_get_device(PCI_VENDOR_ID_CAVIUM
, PCI_DEVID_OCTEONTX2_LBK
,
2992 base
= pci_ioremap_bar(pdev
, 0);
2996 /* Read number of available LBK channels from LBK(0)_CONST register. */
2997 ret
= (readq(base
+ 0x10) >> 32) & 0xffff;
3005 static int rvu_enable_sriov(struct rvu
*rvu
)
3007 struct pci_dev
*pdev
= rvu
->pdev
;
3008 int err
, chans
, vfs
;
3010 if (!rvu_afvf_msix_vectors_num_ok(rvu
)) {
3011 dev_warn(&pdev
->dev
,
3012 "Skipping SRIOV enablement since not enough IRQs are available\n");
3016 chans
= rvu_get_num_lbk_chans();
3020 vfs
= pci_sriov_get_totalvfs(pdev
);
3022 /* Limit VFs in case we have more VFs than LBK channels available. */
3029 /* LBK channel number 63 is used for switching packets between
3030 * CGX mapped VFs. Hence limit LBK pairs till 62 only.
3035 /* Save VFs number for reference in VF interrupts handlers.
3036 * Since interrupts might start arriving during SRIOV enablement
3037 * ordinary API cannot be used to get number of enabled VFs.
3041 err
= rvu_mbox_init(rvu
, &rvu
->afvf_wq_info
, TYPE_AFVF
, vfs
,
3042 rvu_afvf_mbox_handler
, rvu_afvf_mbox_up_handler
);
3046 rvu_enable_afvf_intr(rvu
);
3047 /* Make sure IRQs are enabled before SRIOV. */
3050 err
= pci_enable_sriov(pdev
, vfs
);
3052 rvu_disable_afvf_intr(rvu
);
3053 rvu_mbox_destroy(&rvu
->afvf_wq_info
);
3060 static void rvu_disable_sriov(struct rvu
*rvu
)
3062 rvu_disable_afvf_intr(rvu
);
3063 rvu_mbox_destroy(&rvu
->afvf_wq_info
);
3064 pci_disable_sriov(rvu
->pdev
);
3067 static void rvu_update_module_params(struct rvu
*rvu
)
3069 const char *default_pfl_name
= "default";
3071 strscpy(rvu
->mkex_pfl_name
,
3072 mkex_profile
? mkex_profile
: default_pfl_name
, MKEX_NAME_LEN
);
3073 strscpy(rvu
->kpu_pfl_name
,
3074 kpu_profile
? kpu_profile
: default_pfl_name
, KPU_NAME_LEN
);
3077 static int rvu_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
3079 struct device
*dev
= &pdev
->dev
;
3083 rvu
= devm_kzalloc(dev
, sizeof(*rvu
), GFP_KERNEL
);
3087 rvu
->hw
= devm_kzalloc(dev
, sizeof(struct rvu_hwinfo
), GFP_KERNEL
);
3089 devm_kfree(dev
, rvu
);
3093 pci_set_drvdata(pdev
, rvu
);
3095 rvu
->dev
= &pdev
->dev
;
3097 err
= pci_enable_device(pdev
);
3099 dev_err(dev
, "Failed to enable PCI device\n");
3103 err
= pci_request_regions(pdev
, DRV_NAME
);
3105 dev_err(dev
, "PCI request regions failed 0x%x\n", err
);
3106 goto err_disable_device
;
3109 err
= dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(48));
3111 dev_err(dev
, "DMA mask config failed, abort\n");
3112 goto err_release_regions
;
3115 pci_set_master(pdev
);
3117 rvu
->ptp
= ptp_get();
3118 if (IS_ERR(rvu
->ptp
)) {
3119 err
= PTR_ERR(rvu
->ptp
);
3120 if (err
== -EPROBE_DEFER
)
3121 goto err_release_regions
;
3125 /* Map Admin function CSRs */
3126 rvu
->afreg_base
= pcim_iomap(pdev
, PCI_AF_REG_BAR_NUM
, 0);
3127 rvu
->pfreg_base
= pcim_iomap(pdev
, PCI_PF_REG_BAR_NUM
, 0);
3128 if (!rvu
->afreg_base
|| !rvu
->pfreg_base
) {
3129 dev_err(dev
, "Unable to map admin function CSRs, aborting\n");
3134 /* Store module params in rvu structure */
3135 rvu_update_module_params(rvu
);
3137 /* Check which blocks the HW supports */
3138 rvu_check_block_implemented(rvu
);
3140 rvu_reset_all_blocks(rvu
);
3142 rvu_setup_hw_capabilities(rvu
);
3144 err
= rvu_setup_hw_resources(rvu
);
3148 /* Init mailbox btw AF and PFs */
3149 err
= rvu_mbox_init(rvu
, &rvu
->afpf_wq_info
, TYPE_AFPF
,
3150 rvu
->hw
->total_pfs
, rvu_afpf_mbox_handler
,
3151 rvu_afpf_mbox_up_handler
);
3153 dev_err(dev
, "%s: Failed to initialize mbox\n", __func__
);
3157 err
= rvu_flr_init(rvu
);
3159 dev_err(dev
, "%s: Failed to initialize flr\n", __func__
);
3163 err
= rvu_register_interrupts(rvu
);
3165 dev_err(dev
, "%s: Failed to register interrupts\n", __func__
);
3169 err
= rvu_register_dl(rvu
);
3171 dev_err(dev
, "%s: Failed to register devlink\n", __func__
);
3175 rvu_setup_rvum_blk_revid(rvu
);
3177 /* Enable AF's VFs (if any) */
3178 err
= rvu_enable_sriov(rvu
);
3180 dev_err(dev
, "%s: Failed to enable sriov\n", __func__
);
3184 /* Initialize debugfs */
3187 mutex_init(&rvu
->rswitch
.switch_lock
);
3191 rvu_unregister_dl(rvu
);
3193 rvu_unregister_interrupts(rvu
);
3195 rvu_flr_wq_destroy(rvu
);
3197 rvu_mbox_destroy(&rvu
->afpf_wq_info
);
3200 rvu_fwdata_exit(rvu
);
3201 rvu_reset_all_blocks(rvu
);
3202 rvu_free_hw_resources(rvu
);
3203 rvu_clear_rvum_blk_revid(rvu
);
3206 err_release_regions
:
3207 pci_release_regions(pdev
);
3209 pci_disable_device(pdev
);
3211 pci_set_drvdata(pdev
, NULL
);
3212 devm_kfree(&pdev
->dev
, rvu
->hw
);
3213 devm_kfree(dev
, rvu
);
3217 static void rvu_remove(struct pci_dev
*pdev
)
3219 struct rvu
*rvu
= pci_get_drvdata(pdev
);
3222 rvu_unregister_dl(rvu
);
3223 rvu_unregister_interrupts(rvu
);
3224 rvu_flr_wq_destroy(rvu
);
3226 rvu_fwdata_exit(rvu
);
3227 rvu_mbox_destroy(&rvu
->afpf_wq_info
);
3228 rvu_disable_sriov(rvu
);
3229 rvu_reset_all_blocks(rvu
);
3230 rvu_free_hw_resources(rvu
);
3231 rvu_clear_rvum_blk_revid(rvu
);
3233 pci_release_regions(pdev
);
3234 pci_disable_device(pdev
);
3235 pci_set_drvdata(pdev
, NULL
);
3237 devm_kfree(&pdev
->dev
, rvu
->hw
);
3238 devm_kfree(&pdev
->dev
, rvu
);
3241 static struct pci_driver rvu_driver
= {
3243 .id_table
= rvu_id_table
,
3245 .remove
= rvu_remove
,
3248 static int __init
rvu_init_module(void)
3252 pr_info("%s: %s\n", DRV_NAME
, DRV_STRING
);
3254 err
= pci_register_driver(&cgx_driver
);
3258 err
= pci_register_driver(&ptp_driver
);
3262 err
= pci_register_driver(&rvu_driver
);
3268 pci_unregister_driver(&ptp_driver
);
3270 pci_unregister_driver(&cgx_driver
);
3275 static void __exit
rvu_cleanup_module(void)
3277 pci_unregister_driver(&rvu_driver
);
3278 pci_unregister_driver(&ptp_driver
);
3279 pci_unregister_driver(&cgx_driver
);
3282 module_init(rvu_init_module
);
3283 module_exit(rvu_cleanup_module
);