2 * QEMU PowerPC XIVE interrupt controller model
4 * Copyright (c) 2017-2019, IBM Corporation.
6 * This code is licensed under the GPL version 2 or later. See the
7 * COPYING file in the top-level directory.
10 #include "qemu/osdep.h"
12 #include "qemu/module.h"
13 #include "qapi/error.h"
14 #include "target/ppc/cpu.h"
15 #include "sysemu/cpus.h"
16 #include "sysemu/dma.h"
17 #include "sysemu/reset.h"
18 #include "monitor/monitor.h"
19 #include "hw/ppc/fdt.h"
20 #include "hw/ppc/pnv.h"
21 #include "hw/ppc/pnv_core.h"
22 #include "hw/ppc/pnv_xscom.h"
23 #include "hw/ppc/pnv_xive.h"
24 #include "hw/ppc/xive_regs.h"
25 #include "hw/qdev-properties.h"
26 #include "hw/ppc/ppc.h"
30 #include "pnv_xive_regs.h"
35 * Virtual structures table (VST)
37 #define SBE_PER_BYTE 4
39 typedef struct XiveVstInfo
{
45 static const XiveVstInfo vst_infos
[] = {
46 [VST_TSEL_IVT
] = { "EAT", sizeof(XiveEAS
), 16 },
47 [VST_TSEL_SBE
] = { "SBE", 1, 16 },
48 [VST_TSEL_EQDT
] = { "ENDT", sizeof(XiveEND
), 16 },
49 [VST_TSEL_VPDT
] = { "VPDT", sizeof(XiveNVT
), 32 },
52 * Interrupt fifo backing store table (not modeled) :
57 * 3 - Second escalate,
59 * 5 - IPI cascaded queue ?
61 [VST_TSEL_IRQ
] = { "IRQ", 1, 6 },
64 #define xive_error(xive, fmt, ...) \
65 qemu_log_mask(LOG_GUEST_ERROR, "XIVE[%x] - " fmt "\n", \
66 (xive)->chip->chip_id, ## __VA_ARGS__);
69 * QEMU version of the GETFIELD/SETFIELD macros
71 * TODO: It might be better to use the existing extract64() and
72 * deposit64() but this means that all the register definitions will
73 * change and become incompatible with the ones found in skiboot.
75 * Keep it as it is for now until we find a common ground.
77 static inline uint64_t GETFIELD(uint64_t mask
, uint64_t word
)
79 return (word
& mask
) >> ctz64(mask
);
82 static inline uint64_t SETFIELD(uint64_t mask
, uint64_t word
,
85 return (word
& ~mask
) | ((value
<< ctz64(mask
)) & mask
);
89 * When PC_TCTXT_CHIPID_OVERRIDE is configured, the PC_TCTXT_CHIPID
90 * field overrides the hardwired chip ID in the Powerbus operations
91 * and for CAM compares
93 static uint8_t pnv_xive_block_id(PnvXive
*xive
)
95 uint8_t blk
= xive
->chip
->chip_id
;
96 uint64_t cfg_val
= xive
->regs
[PC_TCTXT_CFG
>> 3];
98 if (cfg_val
& PC_TCTXT_CHIPID_OVERRIDE
) {
99 blk
= GETFIELD(PC_TCTXT_CHIPID
, cfg_val
);
106 * Remote access to controllers. HW uses MMIOs. For now, a simple scan
107 * of the chips is good enough.
109 * TODO: Block scope support
111 static PnvXive
*pnv_xive_get_remote(uint8_t blk
)
113 PnvMachineState
*pnv
= PNV_MACHINE(qdev_get_machine());
116 for (i
= 0; i
< pnv
->num_chips
; i
++) {
117 Pnv9Chip
*chip9
= PNV9_CHIP(pnv
->chips
[i
]);
118 PnvXive
*xive
= &chip9
->xive
;
120 if (pnv_xive_block_id(xive
) == blk
) {
128 * VST accessors for SBE, EAT, ENDT, NVT
130 * Indirect VST tables are arrays of VSDs pointing to a page (of same
131 * size). Each page is a direct VST table.
134 #define XIVE_VSD_SIZE 8
136 /* Indirect page size can be 4K, 64K, 2M, 16M. */
137 static uint64_t pnv_xive_vst_page_size_allowed(uint32_t page_shift
)
139 return page_shift
== 12 || page_shift
== 16 ||
140 page_shift
== 21 || page_shift
== 24;
143 static uint64_t pnv_xive_vst_addr_direct(PnvXive
*xive
, uint32_t type
,
144 uint64_t vsd
, uint32_t idx
)
146 const XiveVstInfo
*info
= &vst_infos
[type
];
147 uint64_t vst_addr
= vsd
& VSD_ADDRESS_MASK
;
148 uint64_t vst_tsize
= 1ull << (GETFIELD(VSD_TSIZE
, vsd
) + 12);
151 idx_max
= vst_tsize
/ info
->size
- 1;
154 xive_error(xive
, "VST: %s entry %x out of range [ 0 .. %x ] !?",
155 info
->name
, idx
, idx_max
);
160 return vst_addr
+ idx
* info
->size
;
163 static uint64_t pnv_xive_vst_addr_indirect(PnvXive
*xive
, uint32_t type
,
164 uint64_t vsd
, uint32_t idx
)
166 const XiveVstInfo
*info
= &vst_infos
[type
];
170 uint32_t vst_per_page
;
172 /* Get the page size of the indirect table. */
173 vsd_addr
= vsd
& VSD_ADDRESS_MASK
;
174 vsd
= ldq_be_dma(&address_space_memory
, vsd_addr
);
176 if (!(vsd
& VSD_ADDRESS_MASK
)) {
178 xive_error(xive
, "VST: invalid %s entry %x !?", info
->name
, idx
);
183 page_shift
= GETFIELD(VSD_TSIZE
, vsd
) + 12;
185 if (!pnv_xive_vst_page_size_allowed(page_shift
)) {
186 xive_error(xive
, "VST: invalid %s page shift %d", info
->name
,
191 vst_per_page
= (1ull << page_shift
) / info
->size
;
192 vsd_idx
= idx
/ vst_per_page
;
194 /* Load the VSD we are looking for, if not already done */
196 vsd_addr
= vsd_addr
+ vsd_idx
* XIVE_VSD_SIZE
;
197 vsd
= ldq_be_dma(&address_space_memory
, vsd_addr
);
199 if (!(vsd
& VSD_ADDRESS_MASK
)) {
201 xive_error(xive
, "VST: invalid %s entry %x !?", info
->name
, idx
);
207 * Check that the pages have a consistent size across the
210 if (page_shift
!= GETFIELD(VSD_TSIZE
, vsd
) + 12) {
211 xive_error(xive
, "VST: %s entry %x indirect page size differ !?",
217 return pnv_xive_vst_addr_direct(xive
, type
, vsd
, (idx
% vst_per_page
));
220 static uint64_t pnv_xive_vst_addr(PnvXive
*xive
, uint32_t type
, uint8_t blk
,
223 const XiveVstInfo
*info
= &vst_infos
[type
];
226 if (blk
>= info
->max_blocks
) {
227 xive_error(xive
, "VST: invalid block id %d for VST %s %d !?",
228 blk
, info
->name
, idx
);
232 vsd
= xive
->vsds
[type
][blk
];
234 /* Remote VST access */
235 if (GETFIELD(VSD_MODE
, vsd
) == VSD_MODE_FORWARD
) {
236 xive
= pnv_xive_get_remote(blk
);
238 return xive
? pnv_xive_vst_addr(xive
, type
, blk
, idx
) : 0;
241 if (VSD_INDIRECT
& vsd
) {
242 return pnv_xive_vst_addr_indirect(xive
, type
, vsd
, idx
);
245 return pnv_xive_vst_addr_direct(xive
, type
, vsd
, idx
);
248 static int pnv_xive_vst_read(PnvXive
*xive
, uint32_t type
, uint8_t blk
,
249 uint32_t idx
, void *data
)
251 const XiveVstInfo
*info
= &vst_infos
[type
];
252 uint64_t addr
= pnv_xive_vst_addr(xive
, type
, blk
, idx
);
258 cpu_physical_memory_read(addr
, data
, info
->size
);
262 #define XIVE_VST_WORD_ALL -1
264 static int pnv_xive_vst_write(PnvXive
*xive
, uint32_t type
, uint8_t blk
,
265 uint32_t idx
, void *data
, uint32_t word_number
)
267 const XiveVstInfo
*info
= &vst_infos
[type
];
268 uint64_t addr
= pnv_xive_vst_addr(xive
, type
, blk
, idx
);
274 if (word_number
== XIVE_VST_WORD_ALL
) {
275 cpu_physical_memory_write(addr
, data
, info
->size
);
277 cpu_physical_memory_write(addr
+ word_number
* 4,
278 data
+ word_number
* 4, 4);
283 static int pnv_xive_get_end(XiveRouter
*xrtr
, uint8_t blk
, uint32_t idx
,
286 return pnv_xive_vst_read(PNV_XIVE(xrtr
), VST_TSEL_EQDT
, blk
, idx
, end
);
289 static int pnv_xive_write_end(XiveRouter
*xrtr
, uint8_t blk
, uint32_t idx
,
290 XiveEND
*end
, uint8_t word_number
)
292 return pnv_xive_vst_write(PNV_XIVE(xrtr
), VST_TSEL_EQDT
, blk
, idx
, end
,
296 static int pnv_xive_end_update(PnvXive
*xive
)
298 uint8_t blk
= GETFIELD(VC_EQC_CWATCH_BLOCKID
,
299 xive
->regs
[(VC_EQC_CWATCH_SPEC
>> 3)]);
300 uint32_t idx
= GETFIELD(VC_EQC_CWATCH_OFFSET
,
301 xive
->regs
[(VC_EQC_CWATCH_SPEC
>> 3)]);
303 uint64_t eqc_watch
[4];
305 for (i
= 0; i
< ARRAY_SIZE(eqc_watch
); i
++) {
306 eqc_watch
[i
] = cpu_to_be64(xive
->regs
[(VC_EQC_CWATCH_DAT0
>> 3) + i
]);
309 return pnv_xive_vst_write(xive
, VST_TSEL_EQDT
, blk
, idx
, eqc_watch
,
313 static void pnv_xive_end_cache_load(PnvXive
*xive
)
315 uint8_t blk
= GETFIELD(VC_EQC_CWATCH_BLOCKID
,
316 xive
->regs
[(VC_EQC_CWATCH_SPEC
>> 3)]);
317 uint32_t idx
= GETFIELD(VC_EQC_CWATCH_OFFSET
,
318 xive
->regs
[(VC_EQC_CWATCH_SPEC
>> 3)]);
319 uint64_t eqc_watch
[4] = { 0 };
322 if (pnv_xive_vst_read(xive
, VST_TSEL_EQDT
, blk
, idx
, eqc_watch
)) {
323 xive_error(xive
, "VST: no END entry %x/%x !?", blk
, idx
);
326 for (i
= 0; i
< ARRAY_SIZE(eqc_watch
); i
++) {
327 xive
->regs
[(VC_EQC_CWATCH_DAT0
>> 3) + i
] = be64_to_cpu(eqc_watch
[i
]);
331 static int pnv_xive_get_nvt(XiveRouter
*xrtr
, uint8_t blk
, uint32_t idx
,
334 return pnv_xive_vst_read(PNV_XIVE(xrtr
), VST_TSEL_VPDT
, blk
, idx
, nvt
);
337 static int pnv_xive_write_nvt(XiveRouter
*xrtr
, uint8_t blk
, uint32_t idx
,
338 XiveNVT
*nvt
, uint8_t word_number
)
340 return pnv_xive_vst_write(PNV_XIVE(xrtr
), VST_TSEL_VPDT
, blk
, idx
, nvt
,
344 static int pnv_xive_nvt_update(PnvXive
*xive
)
346 uint8_t blk
= GETFIELD(PC_VPC_CWATCH_BLOCKID
,
347 xive
->regs
[(PC_VPC_CWATCH_SPEC
>> 3)]);
348 uint32_t idx
= GETFIELD(PC_VPC_CWATCH_OFFSET
,
349 xive
->regs
[(PC_VPC_CWATCH_SPEC
>> 3)]);
351 uint64_t vpc_watch
[8];
353 for (i
= 0; i
< ARRAY_SIZE(vpc_watch
); i
++) {
354 vpc_watch
[i
] = cpu_to_be64(xive
->regs
[(PC_VPC_CWATCH_DAT0
>> 3) + i
]);
357 return pnv_xive_vst_write(xive
, VST_TSEL_VPDT
, blk
, idx
, vpc_watch
,
361 static void pnv_xive_nvt_cache_load(PnvXive
*xive
)
363 uint8_t blk
= GETFIELD(PC_VPC_CWATCH_BLOCKID
,
364 xive
->regs
[(PC_VPC_CWATCH_SPEC
>> 3)]);
365 uint32_t idx
= GETFIELD(PC_VPC_CWATCH_OFFSET
,
366 xive
->regs
[(PC_VPC_CWATCH_SPEC
>> 3)]);
367 uint64_t vpc_watch
[8] = { 0 };
370 if (pnv_xive_vst_read(xive
, VST_TSEL_VPDT
, blk
, idx
, vpc_watch
)) {
371 xive_error(xive
, "VST: no NVT entry %x/%x !?", blk
, idx
);
374 for (i
= 0; i
< ARRAY_SIZE(vpc_watch
); i
++) {
375 xive
->regs
[(PC_VPC_CWATCH_DAT0
>> 3) + i
] = be64_to_cpu(vpc_watch
[i
]);
379 static int pnv_xive_get_eas(XiveRouter
*xrtr
, uint8_t blk
, uint32_t idx
,
382 PnvXive
*xive
= PNV_XIVE(xrtr
);
385 * EAT lookups should be local to the IC
387 if (pnv_xive_block_id(xive
) != blk
) {
388 xive_error(xive
, "VST: EAS %x is remote !?", XIVE_EAS(blk
, idx
));
392 return pnv_xive_vst_read(xive
, VST_TSEL_IVT
, blk
, idx
, eas
);
396 * One bit per thread id. The first register PC_THREAD_EN_REG0 covers
397 * the first cores 0-15 (normal) of the chip or 0-7 (fused). The
398 * second register covers cores 16-23 (normal) or 8-11 (fused).
400 static bool pnv_xive_is_cpu_enabled(PnvXive
*xive
, PowerPCCPU
*cpu
)
402 int pir
= ppc_cpu_pir(cpu
);
403 uint32_t fc
= PNV9_PIR2FUSEDCORE(pir
);
404 uint64_t reg
= fc
< 8 ? PC_THREAD_EN_REG0
: PC_THREAD_EN_REG1
;
405 uint32_t bit
= pir
& 0x3f;
407 return xive
->regs
[reg
>> 3] & PPC_BIT(bit
);
410 static int pnv_xive_match_nvt(XivePresenter
*xptr
, uint8_t format
,
411 uint8_t nvt_blk
, uint32_t nvt_idx
,
412 bool cam_ignore
, uint8_t priority
,
413 uint32_t logic_serv
, XiveTCTXMatch
*match
)
415 PnvXive
*xive
= PNV_XIVE(xptr
);
416 PnvChip
*chip
= xive
->chip
;
420 for (i
= 0; i
< chip
->nr_cores
; i
++) {
421 PnvCore
*pc
= chip
->cores
[i
];
422 CPUCore
*cc
= CPU_CORE(pc
);
424 for (j
= 0; j
< cc
->nr_threads
; j
++) {
425 PowerPCCPU
*cpu
= pc
->threads
[j
];
429 if (!pnv_xive_is_cpu_enabled(xive
, cpu
)) {
433 tctx
= XIVE_TCTX(pnv_cpu_state(cpu
)->intc
);
436 * Check the thread context CAM lines and record matches.
438 ring
= xive_presenter_tctx_match(xptr
, tctx
, format
, nvt_blk
,
439 nvt_idx
, cam_ignore
, logic_serv
);
441 * Save the context and follow on to catch duplicates, that we
446 qemu_log_mask(LOG_GUEST_ERROR
, "XIVE: already found a "
447 "thread context NVT %x/%x\n",
462 static uint8_t pnv_xive_get_block_id(XiveRouter
*xrtr
)
464 return pnv_xive_block_id(PNV_XIVE(xrtr
));
468 * The TIMA MMIO space is shared among the chips and to identify the
469 * chip from which the access is being done, we extract the chip id
472 static PnvXive
*pnv_xive_tm_get_xive(PowerPCCPU
*cpu
)
474 int pir
= ppc_cpu_pir(cpu
);
478 chip
= pnv_get_chip(PNV9_PIR2CHIP(pir
));
480 xive
= &PNV9_CHIP(chip
)->xive
;
482 if (!pnv_xive_is_cpu_enabled(xive
, cpu
)) {
483 xive_error(xive
, "IC: CPU %x is not enabled", pir
);
489 * The internal sources (IPIs) of the interrupt controller have no
490 * knowledge of the XIVE chip on which they reside. Encode the block
491 * id in the source interrupt number before forwarding the source
492 * event notification to the Router. This is required on a multichip
495 static void pnv_xive_notify(XiveNotifier
*xn
, uint32_t srcno
)
497 PnvXive
*xive
= PNV_XIVE(xn
);
498 uint8_t blk
= pnv_xive_block_id(xive
);
500 xive_router_notify(xn
, XIVE_EAS(blk
, srcno
));
507 static uint64_t pnv_xive_vc_size(PnvXive
*xive
)
509 return (~xive
->regs
[CQ_VC_BARM
>> 3] + 1) & CQ_VC_BARM_MASK
;
512 static uint64_t pnv_xive_edt_shift(PnvXive
*xive
)
514 return ctz64(pnv_xive_vc_size(xive
) / XIVE_TABLE_EDT_MAX
);
517 static uint64_t pnv_xive_pc_size(PnvXive
*xive
)
519 return (~xive
->regs
[CQ_PC_BARM
>> 3] + 1) & CQ_PC_BARM_MASK
;
522 static uint32_t pnv_xive_nr_ipis(PnvXive
*xive
, uint8_t blk
)
524 uint64_t vsd
= xive
->vsds
[VST_TSEL_SBE
][blk
];
525 uint64_t vst_tsize
= 1ull << (GETFIELD(VSD_TSIZE
, vsd
) + 12);
527 return VSD_INDIRECT
& vsd
? 0 : vst_tsize
* SBE_PER_BYTE
;
531 * Compute the number of entries per indirect subpage.
533 static uint64_t pnv_xive_vst_per_subpage(PnvXive
*xive
, uint32_t type
)
535 uint8_t blk
= pnv_xive_block_id(xive
);
536 uint64_t vsd
= xive
->vsds
[type
][blk
];
537 const XiveVstInfo
*info
= &vst_infos
[type
];
541 /* For direct tables, fake a valid value */
542 if (!(VSD_INDIRECT
& vsd
)) {
546 /* Get the page size of the indirect table. */
547 vsd_addr
= vsd
& VSD_ADDRESS_MASK
;
548 vsd
= ldq_be_dma(&address_space_memory
, vsd_addr
);
550 if (!(vsd
& VSD_ADDRESS_MASK
)) {
552 xive_error(xive
, "VST: invalid %s entry %x !?", info
->name
, idx
);
557 page_shift
= GETFIELD(VSD_TSIZE
, vsd
) + 12;
559 if (!pnv_xive_vst_page_size_allowed(page_shift
)) {
560 xive_error(xive
, "VST: invalid %s page shift %d", info
->name
,
565 return (1ull << page_shift
) / info
->size
;
571 * The Virtualization Controller MMIO region containing the IPI ESB
572 * pages and END ESB pages is sub-divided into "sets" which map
573 * portions of the VC region to the different ESB pages. It is
574 * configured at runtime through the EDT "Domain Table" to let the
575 * firmware decide how to split the VC address space between IPI ESB
576 * pages and END ESB pages.
580 * Computes the overall size of the IPI or the END ESB pages
582 static uint64_t pnv_xive_edt_size(PnvXive
*xive
, uint64_t type
)
584 uint64_t edt_size
= 1ull << pnv_xive_edt_shift(xive
);
588 for (i
= 0; i
< XIVE_TABLE_EDT_MAX
; i
++) {
589 uint64_t edt_type
= GETFIELD(CQ_TDR_EDT_TYPE
, xive
->edt
[i
]);
591 if (edt_type
== type
) {
600 * Maps an offset of the VC region in the IPI or END region using the
601 * layout defined by the EDT "Domaine Table"
603 static uint64_t pnv_xive_edt_offset(PnvXive
*xive
, uint64_t vc_offset
,
607 uint64_t edt_size
= 1ull << pnv_xive_edt_shift(xive
);
608 uint64_t edt_offset
= vc_offset
;
610 for (i
= 0; i
< XIVE_TABLE_EDT_MAX
&& (i
* edt_size
) < vc_offset
; i
++) {
611 uint64_t edt_type
= GETFIELD(CQ_TDR_EDT_TYPE
, xive
->edt
[i
]);
613 if (edt_type
!= type
) {
614 edt_offset
-= edt_size
;
621 static void pnv_xive_edt_resize(PnvXive
*xive
)
623 uint64_t ipi_edt_size
= pnv_xive_edt_size(xive
, CQ_TDR_EDT_IPI
);
624 uint64_t end_edt_size
= pnv_xive_edt_size(xive
, CQ_TDR_EDT_EQ
);
626 memory_region_set_size(&xive
->ipi_edt_mmio
, ipi_edt_size
);
627 memory_region_add_subregion(&xive
->ipi_mmio
, 0, &xive
->ipi_edt_mmio
);
629 memory_region_set_size(&xive
->end_edt_mmio
, end_edt_size
);
630 memory_region_add_subregion(&xive
->end_mmio
, 0, &xive
->end_edt_mmio
);
634 * XIVE Table configuration. Only EDT is supported.
636 static int pnv_xive_table_set_data(PnvXive
*xive
, uint64_t val
)
638 uint64_t tsel
= xive
->regs
[CQ_TAR
>> 3] & CQ_TAR_TSEL
;
639 uint8_t tsel_index
= GETFIELD(CQ_TAR_TSEL_INDEX
, xive
->regs
[CQ_TAR
>> 3]);
640 uint64_t *xive_table
;
644 case CQ_TAR_TSEL_BLK
:
645 max_index
= ARRAY_SIZE(xive
->blk
);
646 xive_table
= xive
->blk
;
648 case CQ_TAR_TSEL_MIG
:
649 max_index
= ARRAY_SIZE(xive
->mig
);
650 xive_table
= xive
->mig
;
652 case CQ_TAR_TSEL_EDT
:
653 max_index
= ARRAY_SIZE(xive
->edt
);
654 xive_table
= xive
->edt
;
656 case CQ_TAR_TSEL_VDT
:
657 max_index
= ARRAY_SIZE(xive
->vdt
);
658 xive_table
= xive
->vdt
;
661 xive_error(xive
, "IC: invalid table %d", (int) tsel
);
665 if (tsel_index
>= max_index
) {
666 xive_error(xive
, "IC: invalid index %d", (int) tsel_index
);
670 xive_table
[tsel_index
] = val
;
672 if (xive
->regs
[CQ_TAR
>> 3] & CQ_TAR_TBL_AUTOINC
) {
673 xive
->regs
[CQ_TAR
>> 3] =
674 SETFIELD(CQ_TAR_TSEL_INDEX
, xive
->regs
[CQ_TAR
>> 3], ++tsel_index
);
678 * EDT configuration is complete. Resize the MMIO windows exposing
679 * the IPI and the END ESBs in the VC region.
681 if (tsel
== CQ_TAR_TSEL_EDT
&& tsel_index
== ARRAY_SIZE(xive
->edt
)) {
682 pnv_xive_edt_resize(xive
);
689 * Virtual Structure Tables (VST) configuration
691 static void pnv_xive_vst_set_exclusive(PnvXive
*xive
, uint8_t type
,
692 uint8_t blk
, uint64_t vsd
)
694 XiveENDSource
*end_xsrc
= &xive
->end_source
;
695 XiveSource
*xsrc
= &xive
->ipi_source
;
696 const XiveVstInfo
*info
= &vst_infos
[type
];
697 uint32_t page_shift
= GETFIELD(VSD_TSIZE
, vsd
) + 12;
698 uint64_t vst_tsize
= 1ull << page_shift
;
699 uint64_t vst_addr
= vsd
& VSD_ADDRESS_MASK
;
703 if (VSD_INDIRECT
& vsd
) {
704 if (!(xive
->regs
[VC_GLOBAL_CONFIG
>> 3] & VC_GCONF_INDIRECT
)) {
705 xive_error(xive
, "VST: %s indirect tables are not enabled",
710 if (!pnv_xive_vst_page_size_allowed(page_shift
)) {
711 xive_error(xive
, "VST: invalid %s page shift %d", info
->name
,
717 if (!QEMU_IS_ALIGNED(vst_addr
, 1ull << page_shift
)) {
718 xive_error(xive
, "VST: %s table address 0x%"PRIx64
" is not aligned with"
719 " page shift %d", info
->name
, vst_addr
, page_shift
);
723 /* Record the table configuration (in SRAM on HW) */
724 xive
->vsds
[type
][blk
] = vsd
;
726 /* Now tune the models with the configuration provided by the FW */
729 case VST_TSEL_IVT
: /* Nothing to be done */
734 * Backing store pages for the END.
736 * If the table is direct, we can compute the number of PQ
737 * entries provisioned by FW (such as skiboot) and resize the
738 * END ESB window accordingly.
740 if (!(VSD_INDIRECT
& vsd
)) {
741 memory_region_set_size(&end_xsrc
->esb_mmio
, (vst_tsize
/ info
->size
)
742 * (1ull << xsrc
->esb_shift
));
744 memory_region_add_subregion(&xive
->end_edt_mmio
, 0,
745 &end_xsrc
->esb_mmio
);
750 * Backing store pages for the source PQ bits. The model does
751 * not use these PQ bits backed in RAM because the XiveSource
754 * If the table is direct, we can compute the number of PQ
755 * entries provisioned by FW (such as skiboot) and resize the
756 * ESB window accordingly.
758 if (!(VSD_INDIRECT
& vsd
)) {
759 memory_region_set_size(&xsrc
->esb_mmio
, vst_tsize
* SBE_PER_BYTE
760 * (1ull << xsrc
->esb_shift
));
762 memory_region_add_subregion(&xive
->ipi_edt_mmio
, 0, &xsrc
->esb_mmio
);
765 case VST_TSEL_VPDT
: /* Not modeled */
766 case VST_TSEL_IRQ
: /* Not modeled */
768 * These tables contains the backing store pages for the
769 * interrupt fifos of the VC sub-engine in case of overflow.
774 g_assert_not_reached();
779 * Both PC and VC sub-engines are configured as each use the Virtual
780 * Structure Tables : SBE, EAS, END and NVT.
782 static void pnv_xive_vst_set_data(PnvXive
*xive
, uint64_t vsd
, bool pc_engine
)
784 uint8_t mode
= GETFIELD(VSD_MODE
, vsd
);
785 uint8_t type
= GETFIELD(VST_TABLE_SELECT
,
786 xive
->regs
[VC_VSD_TABLE_ADDR
>> 3]);
787 uint8_t blk
= GETFIELD(VST_TABLE_BLOCK
,
788 xive
->regs
[VC_VSD_TABLE_ADDR
>> 3]);
789 uint64_t vst_addr
= vsd
& VSD_ADDRESS_MASK
;
791 if (type
> VST_TSEL_IRQ
) {
792 xive_error(xive
, "VST: invalid table type %d", type
);
796 if (blk
>= vst_infos
[type
].max_blocks
) {
797 xive_error(xive
, "VST: invalid block id %d for"
798 " %s table", blk
, vst_infos
[type
].name
);
803 * Only take the VC sub-engine configuration into account because
804 * the XiveRouter model combines both VC and PC sub-engines
811 xive_error(xive
, "VST: invalid %s table address", vst_infos
[type
].name
);
816 case VSD_MODE_FORWARD
:
817 xive
->vsds
[type
][blk
] = vsd
;
820 case VSD_MODE_EXCLUSIVE
:
821 pnv_xive_vst_set_exclusive(xive
, type
, blk
, vsd
);
825 xive_error(xive
, "VST: unsupported table mode %d", mode
);
831 * Interrupt controller MMIO region. The layout is compatible between
834 * Page 0 sub-engine BARs
835 * 0x000 - 0x3FF IC registers
836 * 0x400 - 0x7FF PC registers
837 * 0x800 - 0xFFF VC registers
839 * Page 1 Notify page (writes only)
840 * 0x000 - 0x7FF HW interrupt triggers (PSI, PHB)
841 * 0x800 - 0xFFF forwards and syncs
843 * Page 2 LSI Trigger page (writes only) (not modeled)
844 * Page 3 LSI SB EOI page (reads only) (not modeled)
846 * Page 4-7 indirect TIMA
850 * IC - registers MMIO
852 static void pnv_xive_ic_reg_write(void *opaque
, hwaddr offset
,
853 uint64_t val
, unsigned size
)
855 PnvXive
*xive
= PNV_XIVE(opaque
);
856 MemoryRegion
*sysmem
= get_system_memory();
857 uint32_t reg
= offset
>> 3;
858 bool is_chip0
= xive
->chip
->chip_id
== 0;
863 * XIVE CQ (PowerBus bridge) settings
865 case CQ_MSGSND
: /* msgsnd for doorbells */
866 case CQ_FIRMASK_OR
: /* FIR error reporting */
869 if (val
& CQ_PBI_PC_64K
) {
872 if (val
& CQ_PBI_VC_64K
) {
876 case CQ_CFG_PB_GEN
: /* PowerBus General Configuration */
878 * TODO: CQ_INT_ADDR_OPT for 1-block-per-chip mode
883 * XIVE Virtualization Controller settings
885 case VC_GLOBAL_CONFIG
:
889 * XIVE Presenter Controller settings
891 case PC_GLOBAL_CONFIG
:
893 * PC_GCONF_CHIPID_OVR
894 * Overrides Int command Chip ID with the Chip ID field (DEBUG)
899 * TODO: block group support
905 * enable block tracking and exchange of block ownership
906 * information between Interrupt controllers
913 case VC_SBC_CONFIG
: /* Store EOI configuration */
915 * Configure store EOI if required by firwmare (skiboot has removed
916 * support recently though)
918 if (val
& (VC_SBC_CONF_CPLX_CIST
| VC_SBC_CONF_CIST_BOTH
)) {
919 xive
->ipi_source
.esb_flags
|= XIVE_SRC_STORE_EOI
;
923 case VC_EQC_CONFIG
: /* TODO: silent escalation */
924 case VC_AIB_TX_ORDER_TAG2
: /* relax ordering */
928 * XIVE BAR settings (XSCOM only)
931 /* bit4: resets all BAR registers */
934 case CQ_IC_BAR
: /* IC BAR. 8 pages */
935 xive
->ic_shift
= val
& CQ_IC_BAR_64K
? 16 : 12;
936 if (!(val
& CQ_IC_BAR_VALID
)) {
938 if (xive
->regs
[reg
] & CQ_IC_BAR_VALID
) {
939 memory_region_del_subregion(&xive
->ic_mmio
,
941 memory_region_del_subregion(&xive
->ic_mmio
,
942 &xive
->ic_notify_mmio
);
943 memory_region_del_subregion(&xive
->ic_mmio
,
945 memory_region_del_subregion(&xive
->ic_mmio
,
946 &xive
->tm_indirect_mmio
);
948 memory_region_del_subregion(sysmem
, &xive
->ic_mmio
);
951 xive
->ic_base
= val
& ~(CQ_IC_BAR_VALID
| CQ_IC_BAR_64K
);
952 if (!(xive
->regs
[reg
] & CQ_IC_BAR_VALID
)) {
953 memory_region_add_subregion(sysmem
, xive
->ic_base
,
956 memory_region_add_subregion(&xive
->ic_mmio
, 0,
958 memory_region_add_subregion(&xive
->ic_mmio
,
959 1ul << xive
->ic_shift
,
960 &xive
->ic_notify_mmio
);
961 memory_region_add_subregion(&xive
->ic_mmio
,
962 2ul << xive
->ic_shift
,
964 memory_region_add_subregion(&xive
->ic_mmio
,
965 4ull << xive
->ic_shift
,
966 &xive
->tm_indirect_mmio
);
971 case CQ_TM1_BAR
: /* TM BAR. 4 pages. Map only once */
972 case CQ_TM2_BAR
: /* second TM BAR. for hotplug. Not modeled */
973 xive
->tm_shift
= val
& CQ_TM_BAR_64K
? 16 : 12;
974 if (!(val
& CQ_TM_BAR_VALID
)) {
976 if (xive
->regs
[reg
] & CQ_TM_BAR_VALID
&& is_chip0
) {
977 memory_region_del_subregion(sysmem
, &xive
->tm_mmio
);
980 xive
->tm_base
= val
& ~(CQ_TM_BAR_VALID
| CQ_TM_BAR_64K
);
981 if (!(xive
->regs
[reg
] & CQ_TM_BAR_VALID
) && is_chip0
) {
982 memory_region_add_subregion(sysmem
, xive
->tm_base
,
989 xive
->regs
[reg
] = val
;
990 memory_region_set_size(&xive
->pc_mmio
, pnv_xive_pc_size(xive
));
992 case CQ_PC_BAR
: /* From 32M to 512G */
993 if (!(val
& CQ_PC_BAR_VALID
)) {
995 if (xive
->regs
[reg
] & CQ_PC_BAR_VALID
) {
996 memory_region_del_subregion(sysmem
, &xive
->pc_mmio
);
999 xive
->pc_base
= val
& ~(CQ_PC_BAR_VALID
);
1000 if (!(xive
->regs
[reg
] & CQ_PC_BAR_VALID
)) {
1001 memory_region_add_subregion(sysmem
, xive
->pc_base
,
1008 xive
->regs
[reg
] = val
;
1009 memory_region_set_size(&xive
->vc_mmio
, pnv_xive_vc_size(xive
));
1011 case CQ_VC_BAR
: /* From 64M to 4TB */
1012 if (!(val
& CQ_VC_BAR_VALID
)) {
1014 if (xive
->regs
[reg
] & CQ_VC_BAR_VALID
) {
1015 memory_region_del_subregion(sysmem
, &xive
->vc_mmio
);
1018 xive
->vc_base
= val
& ~(CQ_VC_BAR_VALID
);
1019 if (!(xive
->regs
[reg
] & CQ_VC_BAR_VALID
)) {
1020 memory_region_add_subregion(sysmem
, xive
->vc_base
,
1027 * XIVE Table settings.
1029 case CQ_TAR
: /* Table Address */
1031 case CQ_TDR
: /* Table Data */
1032 pnv_xive_table_set_data(xive
, val
);
1036 * XIVE VC & PC Virtual Structure Table settings
1038 case VC_VSD_TABLE_ADDR
:
1039 case PC_VSD_TABLE_ADDR
: /* Virtual table selector */
1041 case VC_VSD_TABLE_DATA
: /* Virtual table setting */
1042 case PC_VSD_TABLE_DATA
:
1043 pnv_xive_vst_set_data(xive
, val
, offset
== PC_VSD_TABLE_DATA
);
1047 * Interrupt fifo overflow in memory backing store (Not modeled)
1049 case VC_IRQ_CONFIG_IPI
:
1050 case VC_IRQ_CONFIG_HW
:
1051 case VC_IRQ_CONFIG_CASCADE1
:
1052 case VC_IRQ_CONFIG_CASCADE2
:
1053 case VC_IRQ_CONFIG_REDIST
:
1054 case VC_IRQ_CONFIG_IPI_CASC
:
1058 * XIVE hardware thread enablement
1060 case PC_THREAD_EN_REG0
: /* Physical Thread Enable */
1061 case PC_THREAD_EN_REG1
: /* Physical Thread Enable (fused core) */
1064 case PC_THREAD_EN_REG0_SET
:
1065 xive
->regs
[PC_THREAD_EN_REG0
>> 3] |= val
;
1067 case PC_THREAD_EN_REG1_SET
:
1068 xive
->regs
[PC_THREAD_EN_REG1
>> 3] |= val
;
1070 case PC_THREAD_EN_REG0_CLR
:
1071 xive
->regs
[PC_THREAD_EN_REG0
>> 3] &= ~val
;
1073 case PC_THREAD_EN_REG1_CLR
:
1074 xive
->regs
[PC_THREAD_EN_REG1
>> 3] &= ~val
;
1078 * Indirect TIMA access set up. Defines the PIR of the HW thread
1081 case PC_TCTXT_INDIR0
... PC_TCTXT_INDIR3
:
1085 * XIVE PC & VC cache updates for EAS, NVT and END
1087 case VC_IVC_SCRUB_MASK
:
1088 case VC_IVC_SCRUB_TRIG
:
1091 case VC_EQC_CWATCH_SPEC
:
1092 val
&= ~VC_EQC_CWATCH_CONFLICT
; /* HW resets this bit */
1094 case VC_EQC_CWATCH_DAT1
... VC_EQC_CWATCH_DAT3
:
1096 case VC_EQC_CWATCH_DAT0
:
1097 /* writing to DATA0 triggers the cache write */
1098 xive
->regs
[reg
] = val
;
1099 pnv_xive_end_update(xive
);
1101 case VC_EQC_SCRUB_MASK
:
1102 case VC_EQC_SCRUB_TRIG
:
1104 * The scrubbing registers flush the cache in RAM and can also
1109 case PC_VPC_CWATCH_SPEC
:
1110 val
&= ~PC_VPC_CWATCH_CONFLICT
; /* HW resets this bit */
1112 case PC_VPC_CWATCH_DAT1
... PC_VPC_CWATCH_DAT7
:
1114 case PC_VPC_CWATCH_DAT0
:
1115 /* writing to DATA0 triggers the cache write */
1116 xive
->regs
[reg
] = val
;
1117 pnv_xive_nvt_update(xive
);
1119 case PC_VPC_SCRUB_MASK
:
1120 case PC_VPC_SCRUB_TRIG
:
1122 * The scrubbing registers flush the cache in RAM and can also
1129 * XIVE PC & VC cache invalidation
1133 case VC_AT_MACRO_KILL
:
1135 case PC_AT_KILL_MASK
:
1136 case VC_AT_MACRO_KILL_MASK
:
1140 xive_error(xive
, "IC: invalid write to reg=0x%"HWADDR_PRIx
, offset
);
1144 xive
->regs
[reg
] = val
;
1147 static uint64_t pnv_xive_ic_reg_read(void *opaque
, hwaddr offset
, unsigned size
)
1149 PnvXive
*xive
= PNV_XIVE(opaque
);
1151 uint32_t reg
= offset
>> 3;
1167 case PC_TCTXT_TRACK
:
1168 case PC_TCTXT_INDIR0
:
1169 case PC_TCTXT_INDIR1
:
1170 case PC_TCTXT_INDIR2
:
1171 case PC_TCTXT_INDIR3
:
1172 case PC_GLOBAL_CONFIG
:
1174 case PC_VPC_SCRUB_MASK
:
1176 case VC_GLOBAL_CONFIG
:
1177 case VC_AIB_TX_ORDER_TAG2
:
1179 case VC_IRQ_CONFIG_IPI
:
1180 case VC_IRQ_CONFIG_HW
:
1181 case VC_IRQ_CONFIG_CASCADE1
:
1182 case VC_IRQ_CONFIG_CASCADE2
:
1183 case VC_IRQ_CONFIG_REDIST
:
1184 case VC_IRQ_CONFIG_IPI_CASC
:
1186 case VC_EQC_SCRUB_MASK
:
1187 case VC_IVC_SCRUB_MASK
:
1189 case VC_AT_MACRO_KILL_MASK
:
1190 case VC_VSD_TABLE_ADDR
:
1191 case PC_VSD_TABLE_ADDR
:
1192 case VC_VSD_TABLE_DATA
:
1193 case PC_VSD_TABLE_DATA
:
1194 case PC_THREAD_EN_REG0
:
1195 case PC_THREAD_EN_REG1
:
1196 val
= xive
->regs
[reg
];
1200 * XIVE hardware thread enablement
1202 case PC_THREAD_EN_REG0_SET
:
1203 case PC_THREAD_EN_REG0_CLR
:
1204 val
= xive
->regs
[PC_THREAD_EN_REG0
>> 3];
1206 case PC_THREAD_EN_REG1_SET
:
1207 case PC_THREAD_EN_REG1_CLR
:
1208 val
= xive
->regs
[PC_THREAD_EN_REG1
>> 3];
1211 case CQ_MSGSND
: /* Identifies which cores have msgsnd enabled. */
1212 val
= 0xffffff0000000000;
1216 * XIVE PC & VC cache updates for EAS, NVT and END
1218 case VC_EQC_CWATCH_SPEC
:
1219 xive
->regs
[reg
] = ~(VC_EQC_CWATCH_FULL
| VC_EQC_CWATCH_CONFLICT
);
1220 val
= xive
->regs
[reg
];
1222 case VC_EQC_CWATCH_DAT0
:
1224 * Load DATA registers from cache with data requested by the
1227 pnv_xive_end_cache_load(xive
);
1228 val
= xive
->regs
[reg
];
1230 case VC_EQC_CWATCH_DAT1
... VC_EQC_CWATCH_DAT3
:
1231 val
= xive
->regs
[reg
];
1234 case PC_VPC_CWATCH_SPEC
:
1235 xive
->regs
[reg
] = ~(PC_VPC_CWATCH_FULL
| PC_VPC_CWATCH_CONFLICT
);
1236 val
= xive
->regs
[reg
];
1238 case PC_VPC_CWATCH_DAT0
:
1240 * Load DATA registers from cache with data requested by the
1243 pnv_xive_nvt_cache_load(xive
);
1244 val
= xive
->regs
[reg
];
1246 case PC_VPC_CWATCH_DAT1
... PC_VPC_CWATCH_DAT7
:
1247 val
= xive
->regs
[reg
];
1250 case PC_VPC_SCRUB_TRIG
:
1251 case VC_IVC_SCRUB_TRIG
:
1252 case VC_EQC_SCRUB_TRIG
:
1253 xive
->regs
[reg
] &= ~VC_SCRUB_VALID
;
1254 val
= xive
->regs
[reg
];
1258 * XIVE PC & VC cache invalidation
1261 xive
->regs
[reg
] &= ~PC_AT_KILL_VALID
;
1262 val
= xive
->regs
[reg
];
1264 case VC_AT_MACRO_KILL
:
1265 xive
->regs
[reg
] &= ~VC_KILL_VALID
;
1266 val
= xive
->regs
[reg
];
1270 * XIVE synchronisation
1273 val
= VC_EQC_SYNC_MASK
;
1277 xive_error(xive
, "IC: invalid read reg=0x%"HWADDR_PRIx
, offset
);
1283 static const MemoryRegionOps pnv_xive_ic_reg_ops
= {
1284 .read
= pnv_xive_ic_reg_read
,
1285 .write
= pnv_xive_ic_reg_write
,
1286 .endianness
= DEVICE_BIG_ENDIAN
,
1288 .min_access_size
= 8,
1289 .max_access_size
= 8,
1292 .min_access_size
= 8,
1293 .max_access_size
= 8,
1298 * IC - Notify MMIO port page (write only)
1300 #define PNV_XIVE_FORWARD_IPI 0x800 /* Forward IPI */
1301 #define PNV_XIVE_FORWARD_HW 0x880 /* Forward HW */
1302 #define PNV_XIVE_FORWARD_OS_ESC 0x900 /* Forward OS escalation */
1303 #define PNV_XIVE_FORWARD_HW_ESC 0x980 /* Forward Hyp escalation */
1304 #define PNV_XIVE_FORWARD_REDIS 0xa00 /* Forward Redistribution */
1305 #define PNV_XIVE_RESERVED5 0xa80 /* Cache line 5 PowerBUS operation */
1306 #define PNV_XIVE_RESERVED6 0xb00 /* Cache line 6 PowerBUS operation */
1307 #define PNV_XIVE_RESERVED7 0xb80 /* Cache line 7 PowerBUS operation */
1309 /* VC synchronisation */
1310 #define PNV_XIVE_SYNC_IPI 0xc00 /* Sync IPI */
1311 #define PNV_XIVE_SYNC_HW 0xc80 /* Sync HW */
1312 #define PNV_XIVE_SYNC_OS_ESC 0xd00 /* Sync OS escalation */
1313 #define PNV_XIVE_SYNC_HW_ESC 0xd80 /* Sync Hyp escalation */
1314 #define PNV_XIVE_SYNC_REDIS 0xe00 /* Sync Redistribution */
1316 /* PC synchronisation */
1317 #define PNV_XIVE_SYNC_PULL 0xe80 /* Sync pull context */
1318 #define PNV_XIVE_SYNC_PUSH 0xf00 /* Sync push context */
1319 #define PNV_XIVE_SYNC_VPC 0xf80 /* Sync remove VPC store */
1321 static void pnv_xive_ic_hw_trigger(PnvXive
*xive
, hwaddr addr
, uint64_t val
)
1326 if (val
& XIVE_TRIGGER_END
) {
1327 xive_error(xive
, "IC: END trigger at @0x%"HWADDR_PRIx
" data 0x%"PRIx64
,
1333 * Forward the source event notification directly to the Router.
1334 * The source interrupt number should already be correctly encoded
1335 * with the chip block id by the sending device (PHB, PSI).
1337 blk
= XIVE_EAS_BLOCK(val
);
1338 idx
= XIVE_EAS_INDEX(val
);
1340 xive_router_notify(XIVE_NOTIFIER(xive
), XIVE_EAS(blk
, idx
));
1343 static void pnv_xive_ic_notify_write(void *opaque
, hwaddr addr
, uint64_t val
,
1346 PnvXive
*xive
= PNV_XIVE(opaque
);
1348 /* VC: HW triggers */
1350 case 0x000 ... 0x7FF:
1351 pnv_xive_ic_hw_trigger(opaque
, addr
, val
);
1354 /* VC: Forwarded IRQs */
1355 case PNV_XIVE_FORWARD_IPI
:
1356 case PNV_XIVE_FORWARD_HW
:
1357 case PNV_XIVE_FORWARD_OS_ESC
:
1358 case PNV_XIVE_FORWARD_HW_ESC
:
1359 case PNV_XIVE_FORWARD_REDIS
:
1360 /* TODO: forwarded IRQs. Should be like HW triggers */
1361 xive_error(xive
, "IC: forwarded at @0x%"HWADDR_PRIx
" IRQ 0x%"PRIx64
,
1366 case PNV_XIVE_SYNC_IPI
:
1367 case PNV_XIVE_SYNC_HW
:
1368 case PNV_XIVE_SYNC_OS_ESC
:
1369 case PNV_XIVE_SYNC_HW_ESC
:
1370 case PNV_XIVE_SYNC_REDIS
:
1374 case PNV_XIVE_SYNC_PULL
:
1375 case PNV_XIVE_SYNC_PUSH
:
1376 case PNV_XIVE_SYNC_VPC
:
1380 xive_error(xive
, "IC: invalid notify write @%"HWADDR_PRIx
, addr
);
1384 static uint64_t pnv_xive_ic_notify_read(void *opaque
, hwaddr addr
,
1387 PnvXive
*xive
= PNV_XIVE(opaque
);
1389 /* loads are invalid */
1390 xive_error(xive
, "IC: invalid notify read @%"HWADDR_PRIx
, addr
);
1394 static const MemoryRegionOps pnv_xive_ic_notify_ops
= {
1395 .read
= pnv_xive_ic_notify_read
,
1396 .write
= pnv_xive_ic_notify_write
,
1397 .endianness
= DEVICE_BIG_ENDIAN
,
1399 .min_access_size
= 8,
1400 .max_access_size
= 8,
1403 .min_access_size
= 8,
1404 .max_access_size
= 8,
1409 * IC - LSI MMIO handlers (not modeled)
1412 static void pnv_xive_ic_lsi_write(void *opaque
, hwaddr addr
,
1413 uint64_t val
, unsigned size
)
1415 PnvXive
*xive
= PNV_XIVE(opaque
);
1417 xive_error(xive
, "IC: LSI invalid write @%"HWADDR_PRIx
, addr
);
1420 static uint64_t pnv_xive_ic_lsi_read(void *opaque
, hwaddr addr
, unsigned size
)
1422 PnvXive
*xive
= PNV_XIVE(opaque
);
1424 xive_error(xive
, "IC: LSI invalid read @%"HWADDR_PRIx
, addr
);
1428 static const MemoryRegionOps pnv_xive_ic_lsi_ops
= {
1429 .read
= pnv_xive_ic_lsi_read
,
1430 .write
= pnv_xive_ic_lsi_write
,
1431 .endianness
= DEVICE_BIG_ENDIAN
,
1433 .min_access_size
= 8,
1434 .max_access_size
= 8,
1437 .min_access_size
= 8,
1438 .max_access_size
= 8,
1443 * IC - Indirect TIMA MMIO handlers
1447 * When the TIMA is accessed from the indirect page, the thread id of
1448 * the target CPU is configured in the PC_TCTXT_INDIR0 register before
1449 * use. This is used for resets and for debug purpose also.
1451 static XiveTCTX
*pnv_xive_get_indirect_tctx(PnvXive
*xive
)
1453 PnvChip
*chip
= xive
->chip
;
1454 uint64_t tctxt_indir
= xive
->regs
[PC_TCTXT_INDIR0
>> 3];
1455 PowerPCCPU
*cpu
= NULL
;
1458 if (!(tctxt_indir
& PC_TCTXT_INDIR_VALID
)) {
1459 xive_error(xive
, "IC: no indirect TIMA access in progress");
1463 pir
= (chip
->chip_id
<< 8) | GETFIELD(PC_TCTXT_INDIR_THRDID
, tctxt_indir
);
1464 cpu
= pnv_chip_find_cpu(chip
, pir
);
1466 xive_error(xive
, "IC: invalid PIR %x for indirect access", pir
);
1470 /* Check that HW thread is XIVE enabled */
1471 if (!pnv_xive_is_cpu_enabled(xive
, cpu
)) {
1472 xive_error(xive
, "IC: CPU %x is not enabled", pir
);
1475 return XIVE_TCTX(pnv_cpu_state(cpu
)->intc
);
1478 static void xive_tm_indirect_write(void *opaque
, hwaddr offset
,
1479 uint64_t value
, unsigned size
)
1481 XiveTCTX
*tctx
= pnv_xive_get_indirect_tctx(PNV_XIVE(opaque
));
1483 xive_tctx_tm_write(XIVE_PRESENTER(opaque
), tctx
, offset
, value
, size
);
1486 static uint64_t xive_tm_indirect_read(void *opaque
, hwaddr offset
,
1489 XiveTCTX
*tctx
= pnv_xive_get_indirect_tctx(PNV_XIVE(opaque
));
1491 return xive_tctx_tm_read(XIVE_PRESENTER(opaque
), tctx
, offset
, size
);
1494 static const MemoryRegionOps xive_tm_indirect_ops
= {
1495 .read
= xive_tm_indirect_read
,
1496 .write
= xive_tm_indirect_write
,
1497 .endianness
= DEVICE_BIG_ENDIAN
,
1499 .min_access_size
= 1,
1500 .max_access_size
= 8,
1503 .min_access_size
= 1,
1504 .max_access_size
= 8,
1508 static void pnv_xive_tm_write(void *opaque
, hwaddr offset
,
1509 uint64_t value
, unsigned size
)
1511 PowerPCCPU
*cpu
= POWERPC_CPU(current_cpu
);
1512 PnvXive
*xive
= pnv_xive_tm_get_xive(cpu
);
1513 XiveTCTX
*tctx
= XIVE_TCTX(pnv_cpu_state(cpu
)->intc
);
1515 xive_tctx_tm_write(XIVE_PRESENTER(xive
), tctx
, offset
, value
, size
);
1518 static uint64_t pnv_xive_tm_read(void *opaque
, hwaddr offset
, unsigned size
)
1520 PowerPCCPU
*cpu
= POWERPC_CPU(current_cpu
);
1521 PnvXive
*xive
= pnv_xive_tm_get_xive(cpu
);
1522 XiveTCTX
*tctx
= XIVE_TCTX(pnv_cpu_state(cpu
)->intc
);
1524 return xive_tctx_tm_read(XIVE_PRESENTER(xive
), tctx
, offset
, size
);
1527 const MemoryRegionOps pnv_xive_tm_ops
= {
1528 .read
= pnv_xive_tm_read
,
1529 .write
= pnv_xive_tm_write
,
1530 .endianness
= DEVICE_BIG_ENDIAN
,
1532 .min_access_size
= 1,
1533 .max_access_size
= 8,
1536 .min_access_size
= 1,
1537 .max_access_size
= 8,
1542 * Interrupt controller XSCOM region.
1544 static uint64_t pnv_xive_xscom_read(void *opaque
, hwaddr addr
, unsigned size
)
1546 switch (addr
>> 3) {
1547 case X_VC_EQC_CONFIG
:
1548 /* FIXME (skiboot): This is the only XSCOM load. Bizarre. */
1549 return VC_EQC_SYNC_MASK
;
1551 return pnv_xive_ic_reg_read(opaque
, addr
, size
);
1555 static void pnv_xive_xscom_write(void *opaque
, hwaddr addr
,
1556 uint64_t val
, unsigned size
)
1558 pnv_xive_ic_reg_write(opaque
, addr
, val
, size
);
1561 static const MemoryRegionOps pnv_xive_xscom_ops
= {
1562 .read
= pnv_xive_xscom_read
,
1563 .write
= pnv_xive_xscom_write
,
1564 .endianness
= DEVICE_BIG_ENDIAN
,
1566 .min_access_size
= 8,
1567 .max_access_size
= 8,
1570 .min_access_size
= 8,
1571 .max_access_size
= 8,
1576 * Virtualization Controller MMIO region containing the IPI and END ESB pages
1578 static uint64_t pnv_xive_vc_read(void *opaque
, hwaddr offset
,
1581 PnvXive
*xive
= PNV_XIVE(opaque
);
1582 uint64_t edt_index
= offset
>> pnv_xive_edt_shift(xive
);
1583 uint64_t edt_type
= 0;
1584 uint64_t edt_offset
;
1586 AddressSpace
*edt_as
= NULL
;
1589 if (edt_index
< XIVE_TABLE_EDT_MAX
) {
1590 edt_type
= GETFIELD(CQ_TDR_EDT_TYPE
, xive
->edt
[edt_index
]);
1594 case CQ_TDR_EDT_IPI
:
1595 edt_as
= &xive
->ipi_as
;
1598 edt_as
= &xive
->end_as
;
1601 xive_error(xive
, "VC: invalid EDT type for read @%"HWADDR_PRIx
, offset
);
1605 /* Remap the offset for the targeted address space */
1606 edt_offset
= pnv_xive_edt_offset(xive
, offset
, edt_type
);
1608 ret
= address_space_ldq(edt_as
, edt_offset
, MEMTXATTRS_UNSPECIFIED
,
1611 if (result
!= MEMTX_OK
) {
1612 xive_error(xive
, "VC: %s read failed at @0x%"HWADDR_PRIx
" -> @0x%"
1613 HWADDR_PRIx
, edt_type
== CQ_TDR_EDT_IPI
? "IPI" : "END",
1614 offset
, edt_offset
);
1621 static void pnv_xive_vc_write(void *opaque
, hwaddr offset
,
1622 uint64_t val
, unsigned size
)
1624 PnvXive
*xive
= PNV_XIVE(opaque
);
1625 uint64_t edt_index
= offset
>> pnv_xive_edt_shift(xive
);
1626 uint64_t edt_type
= 0;
1627 uint64_t edt_offset
;
1629 AddressSpace
*edt_as
= NULL
;
1631 if (edt_index
< XIVE_TABLE_EDT_MAX
) {
1632 edt_type
= GETFIELD(CQ_TDR_EDT_TYPE
, xive
->edt
[edt_index
]);
1636 case CQ_TDR_EDT_IPI
:
1637 edt_as
= &xive
->ipi_as
;
1640 edt_as
= &xive
->end_as
;
1643 xive_error(xive
, "VC: invalid EDT type for write @%"HWADDR_PRIx
,
1648 /* Remap the offset for the targeted address space */
1649 edt_offset
= pnv_xive_edt_offset(xive
, offset
, edt_type
);
1651 address_space_stq(edt_as
, edt_offset
, val
, MEMTXATTRS_UNSPECIFIED
, &result
);
1652 if (result
!= MEMTX_OK
) {
1653 xive_error(xive
, "VC: write failed at @0x%"HWADDR_PRIx
, edt_offset
);
1657 static const MemoryRegionOps pnv_xive_vc_ops
= {
1658 .read
= pnv_xive_vc_read
,
1659 .write
= pnv_xive_vc_write
,
1660 .endianness
= DEVICE_BIG_ENDIAN
,
1662 .min_access_size
= 8,
1663 .max_access_size
= 8,
1666 .min_access_size
= 8,
1667 .max_access_size
= 8,
1672 * Presenter Controller MMIO region. The Virtualization Controller
1673 * updates the IPB in the NVT table when required. Not modeled.
1675 static uint64_t pnv_xive_pc_read(void *opaque
, hwaddr addr
,
1678 PnvXive
*xive
= PNV_XIVE(opaque
);
1680 xive_error(xive
, "PC: invalid read @%"HWADDR_PRIx
, addr
);
1684 static void pnv_xive_pc_write(void *opaque
, hwaddr addr
,
1685 uint64_t value
, unsigned size
)
1687 PnvXive
*xive
= PNV_XIVE(opaque
);
1689 xive_error(xive
, "PC: invalid write to VC @%"HWADDR_PRIx
, addr
);
1692 static const MemoryRegionOps pnv_xive_pc_ops
= {
1693 .read
= pnv_xive_pc_read
,
1694 .write
= pnv_xive_pc_write
,
1695 .endianness
= DEVICE_BIG_ENDIAN
,
1697 .min_access_size
= 8,
1698 .max_access_size
= 8,
1701 .min_access_size
= 8,
1702 .max_access_size
= 8,
1706 static void xive_nvt_pic_print_info(XiveNVT
*nvt
, uint32_t nvt_idx
,
1709 uint8_t eq_blk
= xive_get_field32(NVT_W1_EQ_BLOCK
, nvt
->w1
);
1710 uint32_t eq_idx
= xive_get_field32(NVT_W1_EQ_INDEX
, nvt
->w1
);
1712 if (!xive_nvt_is_valid(nvt
)) {
1716 monitor_printf(mon
, " %08x end:%02x/%04x IPB:%02x\n", nvt_idx
,
1718 xive_get_field32(NVT_W4_IPB
, nvt
->w4
));
1721 void pnv_xive_pic_print_info(PnvXive
*xive
, Monitor
*mon
)
1723 XiveRouter
*xrtr
= XIVE_ROUTER(xive
);
1724 uint8_t blk
= pnv_xive_block_id(xive
);
1725 uint8_t chip_id
= xive
->chip
->chip_id
;
1726 uint32_t srcno0
= XIVE_EAS(blk
, 0);
1727 uint32_t nr_ipis
= pnv_xive_nr_ipis(xive
, blk
);
1732 uint64_t xive_nvt_per_subpage
;
1734 monitor_printf(mon
, "XIVE[%x] #%d Source %08x .. %08x\n", chip_id
, blk
,
1735 srcno0
, srcno0
+ nr_ipis
- 1);
1736 xive_source_pic_print_info(&xive
->ipi_source
, srcno0
, mon
);
1738 monitor_printf(mon
, "XIVE[%x] #%d EAT %08x .. %08x\n", chip_id
, blk
,
1739 srcno0
, srcno0
+ nr_ipis
- 1);
1740 for (i
= 0; i
< nr_ipis
; i
++) {
1741 if (xive_router_get_eas(xrtr
, blk
, i
, &eas
)) {
1744 if (!xive_eas_is_masked(&eas
)) {
1745 xive_eas_pic_print_info(&eas
, i
, mon
);
1749 monitor_printf(mon
, "XIVE[%x] #%d ENDT\n", chip_id
, blk
);
1751 while (!xive_router_get_end(xrtr
, blk
, i
, &end
)) {
1752 xive_end_pic_print_info(&end
, i
++, mon
);
1755 monitor_printf(mon
, "XIVE[%x] #%d END Escalation EAT\n", chip_id
, blk
);
1757 while (!xive_router_get_end(xrtr
, blk
, i
, &end
)) {
1758 xive_end_eas_pic_print_info(&end
, i
++, mon
);
1761 monitor_printf(mon
, "XIVE[%x] #%d NVTT %08x .. %08x\n", chip_id
, blk
,
1762 0, XIVE_NVT_COUNT
- 1);
1763 xive_nvt_per_subpage
= pnv_xive_vst_per_subpage(xive
, VST_TSEL_VPDT
);
1764 for (i
= 0; i
< XIVE_NVT_COUNT
; i
+= xive_nvt_per_subpage
) {
1765 while (!xive_router_get_nvt(xrtr
, blk
, i
, &nvt
)) {
1766 xive_nvt_pic_print_info(&nvt
, i
++, mon
);
1771 static void pnv_xive_reset(void *dev
)
1773 PnvXive
*xive
= PNV_XIVE(dev
);
1774 XiveSource
*xsrc
= &xive
->ipi_source
;
1775 XiveENDSource
*end_xsrc
= &xive
->end_source
;
1777 /* Default page size (Should be changed at runtime to 64k) */
1778 xive
->ic_shift
= xive
->vc_shift
= xive
->pc_shift
= 12;
1780 /* Clear subregions */
1781 if (memory_region_is_mapped(&xsrc
->esb_mmio
)) {
1782 memory_region_del_subregion(&xive
->ipi_edt_mmio
, &xsrc
->esb_mmio
);
1785 if (memory_region_is_mapped(&xive
->ipi_edt_mmio
)) {
1786 memory_region_del_subregion(&xive
->ipi_mmio
, &xive
->ipi_edt_mmio
);
1789 if (memory_region_is_mapped(&end_xsrc
->esb_mmio
)) {
1790 memory_region_del_subregion(&xive
->end_edt_mmio
, &end_xsrc
->esb_mmio
);
1793 if (memory_region_is_mapped(&xive
->end_edt_mmio
)) {
1794 memory_region_del_subregion(&xive
->end_mmio
, &xive
->end_edt_mmio
);
1798 static void pnv_xive_init(Object
*obj
)
1800 PnvXive
*xive
= PNV_XIVE(obj
);
1802 object_initialize_child(obj
, "ipi_source", &xive
->ipi_source
,
1803 sizeof(xive
->ipi_source
), TYPE_XIVE_SOURCE
,
1804 &error_abort
, NULL
);
1805 object_initialize_child(obj
, "end_source", &xive
->end_source
,
1806 sizeof(xive
->end_source
), TYPE_XIVE_END_SOURCE
,
1807 &error_abort
, NULL
);
1811 * Maximum number of IRQs and ENDs supported by HW
1813 #define PNV_XIVE_NR_IRQS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1814 #define PNV_XIVE_NR_ENDS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1816 static void pnv_xive_realize(DeviceState
*dev
, Error
**errp
)
1818 PnvXive
*xive
= PNV_XIVE(dev
);
1819 PnvXiveClass
*pxc
= PNV_XIVE_GET_CLASS(dev
);
1820 XiveSource
*xsrc
= &xive
->ipi_source
;
1821 XiveENDSource
*end_xsrc
= &xive
->end_source
;
1822 Error
*local_err
= NULL
;
1824 pxc
->parent_realize(dev
, &local_err
);
1826 error_propagate(errp
, local_err
);
1833 * The XiveSource and XiveENDSource objects are realized with the
1834 * maximum allowed HW configuration. The ESB MMIO regions will be
1835 * resized dynamically when the controller is configured by the FW
1836 * to limit accesses to resources not provisioned.
1838 object_property_set_int(OBJECT(xsrc
), PNV_XIVE_NR_IRQS
, "nr-irqs",
1840 object_property_set_link(OBJECT(xsrc
), OBJECT(xive
), "xive",
1842 object_property_set_bool(OBJECT(xsrc
), true, "realized", &local_err
);
1844 error_propagate(errp
, local_err
);
1848 object_property_set_int(OBJECT(end_xsrc
), PNV_XIVE_NR_ENDS
, "nr-ends",
1850 object_property_set_link(OBJECT(end_xsrc
), OBJECT(xive
), "xive",
1852 object_property_set_bool(OBJECT(end_xsrc
), true, "realized", &local_err
);
1854 error_propagate(errp
, local_err
);
1858 /* Default page size. Generally changed at runtime to 64k */
1859 xive
->ic_shift
= xive
->vc_shift
= xive
->pc_shift
= 12;
1861 /* XSCOM region, used for initial configuration of the BARs */
1862 memory_region_init_io(&xive
->xscom_regs
, OBJECT(dev
), &pnv_xive_xscom_ops
,
1863 xive
, "xscom-xive", PNV9_XSCOM_XIVE_SIZE
<< 3);
1865 /* Interrupt controller MMIO regions */
1866 memory_region_init(&xive
->ic_mmio
, OBJECT(dev
), "xive-ic",
1869 memory_region_init_io(&xive
->ic_reg_mmio
, OBJECT(dev
), &pnv_xive_ic_reg_ops
,
1870 xive
, "xive-ic-reg", 1 << xive
->ic_shift
);
1871 memory_region_init_io(&xive
->ic_notify_mmio
, OBJECT(dev
),
1872 &pnv_xive_ic_notify_ops
,
1873 xive
, "xive-ic-notify", 1 << xive
->ic_shift
);
1875 /* The Pervasive LSI trigger and EOI pages (not modeled) */
1876 memory_region_init_io(&xive
->ic_lsi_mmio
, OBJECT(dev
), &pnv_xive_ic_lsi_ops
,
1877 xive
, "xive-ic-lsi", 2 << xive
->ic_shift
);
1879 /* Thread Interrupt Management Area (Indirect) */
1880 memory_region_init_io(&xive
->tm_indirect_mmio
, OBJECT(dev
),
1881 &xive_tm_indirect_ops
,
1882 xive
, "xive-tima-indirect", PNV9_XIVE_TM_SIZE
);
1884 * Overall Virtualization Controller MMIO region containing the
1885 * IPI ESB pages and END ESB pages. The layout is defined by the
1886 * EDT "Domain table" and the accesses are dispatched using
1887 * address spaces for each.
1889 memory_region_init_io(&xive
->vc_mmio
, OBJECT(xive
), &pnv_xive_vc_ops
, xive
,
1890 "xive-vc", PNV9_XIVE_VC_SIZE
);
1892 memory_region_init(&xive
->ipi_mmio
, OBJECT(xive
), "xive-vc-ipi",
1894 address_space_init(&xive
->ipi_as
, &xive
->ipi_mmio
, "xive-vc-ipi");
1895 memory_region_init(&xive
->end_mmio
, OBJECT(xive
), "xive-vc-end",
1897 address_space_init(&xive
->end_as
, &xive
->end_mmio
, "xive-vc-end");
1900 * The MMIO windows exposing the IPI ESBs and the END ESBs in the
1901 * VC region. Their size is configured by the FW in the EDT table.
1903 memory_region_init(&xive
->ipi_edt_mmio
, OBJECT(xive
), "xive-vc-ipi-edt", 0);
1904 memory_region_init(&xive
->end_edt_mmio
, OBJECT(xive
), "xive-vc-end-edt", 0);
1906 /* Presenter Controller MMIO region (not modeled) */
1907 memory_region_init_io(&xive
->pc_mmio
, OBJECT(xive
), &pnv_xive_pc_ops
, xive
,
1908 "xive-pc", PNV9_XIVE_PC_SIZE
);
1910 /* Thread Interrupt Management Area (Direct) */
1911 memory_region_init_io(&xive
->tm_mmio
, OBJECT(xive
), &pnv_xive_tm_ops
,
1912 xive
, "xive-tima", PNV9_XIVE_TM_SIZE
);
1914 qemu_register_reset(pnv_xive_reset
, dev
);
1917 static int pnv_xive_dt_xscom(PnvXScomInterface
*dev
, void *fdt
,
1920 const char compat
[] = "ibm,power9-xive-x";
1923 uint32_t lpc_pcba
= PNV9_XSCOM_XIVE_BASE
;
1925 cpu_to_be32(lpc_pcba
),
1926 cpu_to_be32(PNV9_XSCOM_XIVE_SIZE
)
1929 name
= g_strdup_printf("xive@%x", lpc_pcba
);
1930 offset
= fdt_add_subnode(fdt
, xscom_offset
, name
);
1934 _FDT((fdt_setprop(fdt
, offset
, "reg", reg
, sizeof(reg
))));
1935 _FDT((fdt_setprop(fdt
, offset
, "compatible", compat
,
1940 static Property pnv_xive_properties
[] = {
1941 DEFINE_PROP_UINT64("ic-bar", PnvXive
, ic_base
, 0),
1942 DEFINE_PROP_UINT64("vc-bar", PnvXive
, vc_base
, 0),
1943 DEFINE_PROP_UINT64("pc-bar", PnvXive
, pc_base
, 0),
1944 DEFINE_PROP_UINT64("tm-bar", PnvXive
, tm_base
, 0),
1945 /* The PnvChip id identifies the XIVE interrupt controller. */
1946 DEFINE_PROP_LINK("chip", PnvXive
, chip
, TYPE_PNV_CHIP
, PnvChip
*),
1947 DEFINE_PROP_END_OF_LIST(),
1950 static void pnv_xive_class_init(ObjectClass
*klass
, void *data
)
1952 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1953 PnvXScomInterfaceClass
*xdc
= PNV_XSCOM_INTERFACE_CLASS(klass
);
1954 XiveRouterClass
*xrc
= XIVE_ROUTER_CLASS(klass
);
1955 XiveNotifierClass
*xnc
= XIVE_NOTIFIER_CLASS(klass
);
1956 XivePresenterClass
*xpc
= XIVE_PRESENTER_CLASS(klass
);
1957 PnvXiveClass
*pxc
= PNV_XIVE_CLASS(klass
);
1959 xdc
->dt_xscom
= pnv_xive_dt_xscom
;
1961 dc
->desc
= "PowerNV XIVE Interrupt Controller";
1962 device_class_set_parent_realize(dc
, pnv_xive_realize
, &pxc
->parent_realize
);
1963 dc
->realize
= pnv_xive_realize
;
1964 dc
->props
= pnv_xive_properties
;
1966 xrc
->get_eas
= pnv_xive_get_eas
;
1967 xrc
->get_end
= pnv_xive_get_end
;
1968 xrc
->write_end
= pnv_xive_write_end
;
1969 xrc
->get_nvt
= pnv_xive_get_nvt
;
1970 xrc
->write_nvt
= pnv_xive_write_nvt
;
1971 xrc
->get_block_id
= pnv_xive_get_block_id
;
1973 xnc
->notify
= pnv_xive_notify
;
1974 xpc
->match_nvt
= pnv_xive_match_nvt
;
1977 static const TypeInfo pnv_xive_info
= {
1978 .name
= TYPE_PNV_XIVE
,
1979 .parent
= TYPE_XIVE_ROUTER
,
1980 .instance_init
= pnv_xive_init
,
1981 .instance_size
= sizeof(PnvXive
),
1982 .class_init
= pnv_xive_class_init
,
1983 .class_size
= sizeof(PnvXiveClass
),
1984 .interfaces
= (InterfaceInfo
[]) {
1985 { TYPE_PNV_XSCOM_INTERFACE
},
1990 static void pnv_xive_register_types(void)
1992 type_register_static(&pnv_xive_info
);
1995 type_init(pnv_xive_register_types
)