2 * QEMU PowerPC XIVE interrupt controller model
4 * Copyright (c) 2017-2019, IBM Corporation.
6 * This code is licensed under the GPL version 2 or later. See the
7 * COPYING file in the top-level directory.
10 #include "qemu/osdep.h"
12 #include "qemu/module.h"
13 #include "qapi/error.h"
14 #include "target/ppc/cpu.h"
15 #include "sysemu/cpus.h"
16 #include "sysemu/dma.h"
17 #include "sysemu/reset.h"
18 #include "monitor/monitor.h"
19 #include "hw/ppc/fdt.h"
20 #include "hw/ppc/pnv.h"
21 #include "hw/ppc/pnv_core.h"
22 #include "hw/ppc/pnv_xscom.h"
23 #include "hw/ppc/pnv_xive.h"
24 #include "hw/ppc/xive_regs.h"
25 #include "hw/qdev-properties.h"
26 #include "hw/ppc/ppc.h"
30 #include "pnv_xive_regs.h"
35 * Virtual structures table (VST)
37 #define SBE_PER_BYTE 4
39 typedef struct XiveVstInfo
{
45 static const XiveVstInfo vst_infos
[] = {
46 [VST_TSEL_IVT
] = { "EAT", sizeof(XiveEAS
), 16 },
47 [VST_TSEL_SBE
] = { "SBE", 1, 16 },
48 [VST_TSEL_EQDT
] = { "ENDT", sizeof(XiveEND
), 16 },
49 [VST_TSEL_VPDT
] = { "VPDT", sizeof(XiveNVT
), 32 },
52 * Interrupt fifo backing store table (not modeled) :
57 * 3 - Second escalate,
59 * 5 - IPI cascaded queue ?
61 [VST_TSEL_IRQ
] = { "IRQ", 1, 6 },
64 #define xive_error(xive, fmt, ...) \
65 qemu_log_mask(LOG_GUEST_ERROR, "XIVE[%x] - " fmt "\n", \
66 (xive)->chip->chip_id, ## __VA_ARGS__);
69 * QEMU version of the GETFIELD/SETFIELD macros
71 * TODO: It might be better to use the existing extract64() and
72 * deposit64() but this means that all the register definitions will
73 * change and become incompatible with the ones found in skiboot.
75 * Keep it as it is for now until we find a common ground.
77 static inline uint64_t GETFIELD(uint64_t mask
, uint64_t word
)
79 return (word
& mask
) >> ctz64(mask
);
82 static inline uint64_t SETFIELD(uint64_t mask
, uint64_t word
,
85 return (word
& ~mask
) | ((value
<< ctz64(mask
)) & mask
);
89 * Remote access to controllers. HW uses MMIOs. For now, a simple scan
90 * of the chips is good enough.
92 * TODO: Block scope support
94 static PnvXive
*pnv_xive_get_ic(uint8_t blk
)
96 PnvMachineState
*pnv
= PNV_MACHINE(qdev_get_machine());
99 for (i
= 0; i
< pnv
->num_chips
; i
++) {
100 Pnv9Chip
*chip9
= PNV9_CHIP(pnv
->chips
[i
]);
101 PnvXive
*xive
= &chip9
->xive
;
103 if (xive
->chip
->chip_id
== blk
) {
111 * VST accessors for SBE, EAT, ENDT, NVT
113 * Indirect VST tables are arrays of VSDs pointing to a page (of same
114 * size). Each page is a direct VST table.
117 #define XIVE_VSD_SIZE 8
119 /* Indirect page size can be 4K, 64K, 2M, 16M. */
120 static uint64_t pnv_xive_vst_page_size_allowed(uint32_t page_shift
)
122 return page_shift
== 12 || page_shift
== 16 ||
123 page_shift
== 21 || page_shift
== 24;
126 static uint64_t pnv_xive_vst_addr_direct(PnvXive
*xive
, uint32_t type
,
127 uint64_t vsd
, uint32_t idx
)
129 const XiveVstInfo
*info
= &vst_infos
[type
];
130 uint64_t vst_addr
= vsd
& VSD_ADDRESS_MASK
;
131 uint64_t vst_tsize
= 1ull << (GETFIELD(VSD_TSIZE
, vsd
) + 12);
134 idx_max
= vst_tsize
/ info
->size
- 1;
137 xive_error(xive
, "VST: %s entry %x out of range [ 0 .. %x ] !?",
138 info
->name
, idx
, idx_max
);
143 return vst_addr
+ idx
* info
->size
;
146 static uint64_t pnv_xive_vst_addr_indirect(PnvXive
*xive
, uint32_t type
,
147 uint64_t vsd
, uint32_t idx
)
149 const XiveVstInfo
*info
= &vst_infos
[type
];
153 uint32_t vst_per_page
;
155 /* Get the page size of the indirect table. */
156 vsd_addr
= vsd
& VSD_ADDRESS_MASK
;
157 vsd
= ldq_be_dma(&address_space_memory
, vsd_addr
);
159 if (!(vsd
& VSD_ADDRESS_MASK
)) {
161 xive_error(xive
, "VST: invalid %s entry %x !?", info
->name
, idx
);
166 page_shift
= GETFIELD(VSD_TSIZE
, vsd
) + 12;
168 if (!pnv_xive_vst_page_size_allowed(page_shift
)) {
169 xive_error(xive
, "VST: invalid %s page shift %d", info
->name
,
174 vst_per_page
= (1ull << page_shift
) / info
->size
;
175 vsd_idx
= idx
/ vst_per_page
;
177 /* Load the VSD we are looking for, if not already done */
179 vsd_addr
= vsd_addr
+ vsd_idx
* XIVE_VSD_SIZE
;
180 vsd
= ldq_be_dma(&address_space_memory
, vsd_addr
);
182 if (!(vsd
& VSD_ADDRESS_MASK
)) {
184 xive_error(xive
, "VST: invalid %s entry %x !?", info
->name
, idx
);
190 * Check that the pages have a consistent size across the
193 if (page_shift
!= GETFIELD(VSD_TSIZE
, vsd
) + 12) {
194 xive_error(xive
, "VST: %s entry %x indirect page size differ !?",
200 return pnv_xive_vst_addr_direct(xive
, type
, vsd
, (idx
% vst_per_page
));
203 static uint64_t pnv_xive_vst_addr(PnvXive
*xive
, uint32_t type
, uint8_t blk
,
206 const XiveVstInfo
*info
= &vst_infos
[type
];
209 if (blk
>= info
->max_blocks
) {
210 xive_error(xive
, "VST: invalid block id %d for VST %s %d !?",
211 blk
, info
->name
, idx
);
215 vsd
= xive
->vsds
[type
][blk
];
217 /* Remote VST access */
218 if (GETFIELD(VSD_MODE
, vsd
) == VSD_MODE_FORWARD
) {
219 xive
= pnv_xive_get_ic(blk
);
221 return xive
? pnv_xive_vst_addr(xive
, type
, blk
, idx
) : 0;
224 if (VSD_INDIRECT
& vsd
) {
225 return pnv_xive_vst_addr_indirect(xive
, type
, vsd
, idx
);
228 return pnv_xive_vst_addr_direct(xive
, type
, vsd
, idx
);
231 static int pnv_xive_vst_read(PnvXive
*xive
, uint32_t type
, uint8_t blk
,
232 uint32_t idx
, void *data
)
234 const XiveVstInfo
*info
= &vst_infos
[type
];
235 uint64_t addr
= pnv_xive_vst_addr(xive
, type
, blk
, idx
);
241 cpu_physical_memory_read(addr
, data
, info
->size
);
245 #define XIVE_VST_WORD_ALL -1
247 static int pnv_xive_vst_write(PnvXive
*xive
, uint32_t type
, uint8_t blk
,
248 uint32_t idx
, void *data
, uint32_t word_number
)
250 const XiveVstInfo
*info
= &vst_infos
[type
];
251 uint64_t addr
= pnv_xive_vst_addr(xive
, type
, blk
, idx
);
257 if (word_number
== XIVE_VST_WORD_ALL
) {
258 cpu_physical_memory_write(addr
, data
, info
->size
);
260 cpu_physical_memory_write(addr
+ word_number
* 4,
261 data
+ word_number
* 4, 4);
266 static int pnv_xive_get_end(XiveRouter
*xrtr
, uint8_t blk
, uint32_t idx
,
269 return pnv_xive_vst_read(PNV_XIVE(xrtr
), VST_TSEL_EQDT
, blk
, idx
, end
);
272 static int pnv_xive_write_end(XiveRouter
*xrtr
, uint8_t blk
, uint32_t idx
,
273 XiveEND
*end
, uint8_t word_number
)
275 return pnv_xive_vst_write(PNV_XIVE(xrtr
), VST_TSEL_EQDT
, blk
, idx
, end
,
279 static int pnv_xive_end_update(PnvXive
*xive
)
281 uint8_t blk
= GETFIELD(VC_EQC_CWATCH_BLOCKID
,
282 xive
->regs
[(VC_EQC_CWATCH_SPEC
>> 3)]);
283 uint32_t idx
= GETFIELD(VC_EQC_CWATCH_OFFSET
,
284 xive
->regs
[(VC_EQC_CWATCH_SPEC
>> 3)]);
286 uint64_t eqc_watch
[4];
288 for (i
= 0; i
< ARRAY_SIZE(eqc_watch
); i
++) {
289 eqc_watch
[i
] = cpu_to_be64(xive
->regs
[(VC_EQC_CWATCH_DAT0
>> 3) + i
]);
292 return pnv_xive_vst_write(xive
, VST_TSEL_EQDT
, blk
, idx
, eqc_watch
,
296 static void pnv_xive_end_cache_load(PnvXive
*xive
)
298 uint8_t blk
= GETFIELD(VC_EQC_CWATCH_BLOCKID
,
299 xive
->regs
[(VC_EQC_CWATCH_SPEC
>> 3)]);
300 uint32_t idx
= GETFIELD(VC_EQC_CWATCH_OFFSET
,
301 xive
->regs
[(VC_EQC_CWATCH_SPEC
>> 3)]);
302 uint64_t eqc_watch
[4] = { 0 };
305 if (pnv_xive_vst_read(xive
, VST_TSEL_EQDT
, blk
, idx
, eqc_watch
)) {
306 xive_error(xive
, "VST: no END entry %x/%x !?", blk
, idx
);
309 for (i
= 0; i
< ARRAY_SIZE(eqc_watch
); i
++) {
310 xive
->regs
[(VC_EQC_CWATCH_DAT0
>> 3) + i
] = be64_to_cpu(eqc_watch
[i
]);
314 static int pnv_xive_get_nvt(XiveRouter
*xrtr
, uint8_t blk
, uint32_t idx
,
317 return pnv_xive_vst_read(PNV_XIVE(xrtr
), VST_TSEL_VPDT
, blk
, idx
, nvt
);
320 static int pnv_xive_write_nvt(XiveRouter
*xrtr
, uint8_t blk
, uint32_t idx
,
321 XiveNVT
*nvt
, uint8_t word_number
)
323 return pnv_xive_vst_write(PNV_XIVE(xrtr
), VST_TSEL_VPDT
, blk
, idx
, nvt
,
327 static int pnv_xive_nvt_update(PnvXive
*xive
)
329 uint8_t blk
= GETFIELD(PC_VPC_CWATCH_BLOCKID
,
330 xive
->regs
[(PC_VPC_CWATCH_SPEC
>> 3)]);
331 uint32_t idx
= GETFIELD(PC_VPC_CWATCH_OFFSET
,
332 xive
->regs
[(PC_VPC_CWATCH_SPEC
>> 3)]);
334 uint64_t vpc_watch
[8];
336 for (i
= 0; i
< ARRAY_SIZE(vpc_watch
); i
++) {
337 vpc_watch
[i
] = cpu_to_be64(xive
->regs
[(PC_VPC_CWATCH_DAT0
>> 3) + i
]);
340 return pnv_xive_vst_write(xive
, VST_TSEL_VPDT
, blk
, idx
, vpc_watch
,
344 static void pnv_xive_nvt_cache_load(PnvXive
*xive
)
346 uint8_t blk
= GETFIELD(PC_VPC_CWATCH_BLOCKID
,
347 xive
->regs
[(PC_VPC_CWATCH_SPEC
>> 3)]);
348 uint32_t idx
= GETFIELD(PC_VPC_CWATCH_OFFSET
,
349 xive
->regs
[(PC_VPC_CWATCH_SPEC
>> 3)]);
350 uint64_t vpc_watch
[8] = { 0 };
353 if (pnv_xive_vst_read(xive
, VST_TSEL_VPDT
, blk
, idx
, vpc_watch
)) {
354 xive_error(xive
, "VST: no NVT entry %x/%x !?", blk
, idx
);
357 for (i
= 0; i
< ARRAY_SIZE(vpc_watch
); i
++) {
358 xive
->regs
[(PC_VPC_CWATCH_DAT0
>> 3) + i
] = be64_to_cpu(vpc_watch
[i
]);
362 static int pnv_xive_get_eas(XiveRouter
*xrtr
, uint8_t blk
, uint32_t idx
,
365 PnvXive
*xive
= PNV_XIVE(xrtr
);
367 if (pnv_xive_get_ic(blk
) != xive
) {
368 xive_error(xive
, "VST: EAS %x is remote !?", XIVE_EAS(blk
, idx
));
372 return pnv_xive_vst_read(xive
, VST_TSEL_IVT
, blk
, idx
, eas
);
376 * One bit per thread id. The first register PC_THREAD_EN_REG0 covers
377 * the first cores 0-15 (normal) of the chip or 0-7 (fused). The
378 * second register covers cores 16-23 (normal) or 8-11 (fused).
380 static bool pnv_xive_is_cpu_enabled(PnvXive
*xive
, PowerPCCPU
*cpu
)
382 int pir
= ppc_cpu_pir(cpu
);
383 uint32_t fc
= PNV9_PIR2FUSEDCORE(pir
);
384 uint64_t reg
= fc
< 8 ? PC_THREAD_EN_REG0
: PC_THREAD_EN_REG1
;
385 uint32_t bit
= pir
& 0x3f;
387 return xive
->regs
[reg
>> 3] & PPC_BIT(bit
);
390 static int pnv_xive_match_nvt(XivePresenter
*xptr
, uint8_t format
,
391 uint8_t nvt_blk
, uint32_t nvt_idx
,
392 bool cam_ignore
, uint8_t priority
,
393 uint32_t logic_serv
, XiveTCTXMatch
*match
)
395 PnvXive
*xive
= PNV_XIVE(xptr
);
396 PnvChip
*chip
= xive
->chip
;
400 for (i
= 0; i
< chip
->nr_cores
; i
++) {
401 PnvCore
*pc
= chip
->cores
[i
];
402 CPUCore
*cc
= CPU_CORE(pc
);
404 for (j
= 0; j
< cc
->nr_threads
; j
++) {
405 PowerPCCPU
*cpu
= pc
->threads
[j
];
409 if (!pnv_xive_is_cpu_enabled(xive
, cpu
)) {
413 tctx
= XIVE_TCTX(pnv_cpu_state(cpu
)->intc
);
416 * Check the thread context CAM lines and record matches.
418 ring
= xive_presenter_tctx_match(xptr
, tctx
, format
, nvt_blk
,
419 nvt_idx
, cam_ignore
, logic_serv
);
421 * Save the context and follow on to catch duplicates, that we
426 qemu_log_mask(LOG_GUEST_ERROR
, "XIVE: already found a "
427 "thread context NVT %x/%x\n",
442 static XiveTCTX
*pnv_xive_get_tctx(XiveRouter
*xrtr
, CPUState
*cs
)
444 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
445 XiveTCTX
*tctx
= XIVE_TCTX(pnv_cpu_state(cpu
)->intc
);
446 PnvXive
*xive
= NULL
;
447 CPUPPCState
*env
= &cpu
->env
;
448 int pir
= env
->spr_cb
[SPR_PIR
].default_value
;
451 * Perform an extra check on the HW thread enablement.
453 * The TIMA is shared among the chips and to identify the chip
454 * from which the access is being done, we extract the chip id
457 xive
= pnv_xive_get_ic((pir
>> 8) & 0xf);
462 if (!(xive
->regs
[PC_THREAD_EN_REG0
>> 3] & PPC_BIT(pir
& 0x3f))) {
463 xive_error(PNV_XIVE(xrtr
), "IC: CPU %x is not enabled", pir
);
470 * The internal sources (IPIs) of the interrupt controller have no
471 * knowledge of the XIVE chip on which they reside. Encode the block
472 * id in the source interrupt number before forwarding the source
473 * event notification to the Router. This is required on a multichip
476 static void pnv_xive_notify(XiveNotifier
*xn
, uint32_t srcno
)
478 PnvXive
*xive
= PNV_XIVE(xn
);
479 uint8_t blk
= xive
->chip
->chip_id
;
481 xive_router_notify(xn
, XIVE_EAS(blk
, srcno
));
488 static uint64_t pnv_xive_vc_size(PnvXive
*xive
)
490 return (~xive
->regs
[CQ_VC_BARM
>> 3] + 1) & CQ_VC_BARM_MASK
;
493 static uint64_t pnv_xive_edt_shift(PnvXive
*xive
)
495 return ctz64(pnv_xive_vc_size(xive
) / XIVE_TABLE_EDT_MAX
);
498 static uint64_t pnv_xive_pc_size(PnvXive
*xive
)
500 return (~xive
->regs
[CQ_PC_BARM
>> 3] + 1) & CQ_PC_BARM_MASK
;
503 static uint32_t pnv_xive_nr_ipis(PnvXive
*xive
, uint8_t blk
)
505 uint64_t vsd
= xive
->vsds
[VST_TSEL_SBE
][blk
];
506 uint64_t vst_tsize
= 1ull << (GETFIELD(VSD_TSIZE
, vsd
) + 12);
508 return VSD_INDIRECT
& vsd
? 0 : vst_tsize
* SBE_PER_BYTE
;
514 * The Virtualization Controller MMIO region containing the IPI ESB
515 * pages and END ESB pages is sub-divided into "sets" which map
516 * portions of the VC region to the different ESB pages. It is
517 * configured at runtime through the EDT "Domain Table" to let the
518 * firmware decide how to split the VC address space between IPI ESB
519 * pages and END ESB pages.
523 * Computes the overall size of the IPI or the END ESB pages
525 static uint64_t pnv_xive_edt_size(PnvXive
*xive
, uint64_t type
)
527 uint64_t edt_size
= 1ull << pnv_xive_edt_shift(xive
);
531 for (i
= 0; i
< XIVE_TABLE_EDT_MAX
; i
++) {
532 uint64_t edt_type
= GETFIELD(CQ_TDR_EDT_TYPE
, xive
->edt
[i
]);
534 if (edt_type
== type
) {
543 * Maps an offset of the VC region in the IPI or END region using the
544 * layout defined by the EDT "Domaine Table"
546 static uint64_t pnv_xive_edt_offset(PnvXive
*xive
, uint64_t vc_offset
,
550 uint64_t edt_size
= 1ull << pnv_xive_edt_shift(xive
);
551 uint64_t edt_offset
= vc_offset
;
553 for (i
= 0; i
< XIVE_TABLE_EDT_MAX
&& (i
* edt_size
) < vc_offset
; i
++) {
554 uint64_t edt_type
= GETFIELD(CQ_TDR_EDT_TYPE
, xive
->edt
[i
]);
556 if (edt_type
!= type
) {
557 edt_offset
-= edt_size
;
564 static void pnv_xive_edt_resize(PnvXive
*xive
)
566 uint64_t ipi_edt_size
= pnv_xive_edt_size(xive
, CQ_TDR_EDT_IPI
);
567 uint64_t end_edt_size
= pnv_xive_edt_size(xive
, CQ_TDR_EDT_EQ
);
569 memory_region_set_size(&xive
->ipi_edt_mmio
, ipi_edt_size
);
570 memory_region_add_subregion(&xive
->ipi_mmio
, 0, &xive
->ipi_edt_mmio
);
572 memory_region_set_size(&xive
->end_edt_mmio
, end_edt_size
);
573 memory_region_add_subregion(&xive
->end_mmio
, 0, &xive
->end_edt_mmio
);
577 * XIVE Table configuration. Only EDT is supported.
579 static int pnv_xive_table_set_data(PnvXive
*xive
, uint64_t val
)
581 uint64_t tsel
= xive
->regs
[CQ_TAR
>> 3] & CQ_TAR_TSEL
;
582 uint8_t tsel_index
= GETFIELD(CQ_TAR_TSEL_INDEX
, xive
->regs
[CQ_TAR
>> 3]);
583 uint64_t *xive_table
;
587 case CQ_TAR_TSEL_BLK
:
588 max_index
= ARRAY_SIZE(xive
->blk
);
589 xive_table
= xive
->blk
;
591 case CQ_TAR_TSEL_MIG
:
592 max_index
= ARRAY_SIZE(xive
->mig
);
593 xive_table
= xive
->mig
;
595 case CQ_TAR_TSEL_EDT
:
596 max_index
= ARRAY_SIZE(xive
->edt
);
597 xive_table
= xive
->edt
;
599 case CQ_TAR_TSEL_VDT
:
600 max_index
= ARRAY_SIZE(xive
->vdt
);
601 xive_table
= xive
->vdt
;
604 xive_error(xive
, "IC: invalid table %d", (int) tsel
);
608 if (tsel_index
>= max_index
) {
609 xive_error(xive
, "IC: invalid index %d", (int) tsel_index
);
613 xive_table
[tsel_index
] = val
;
615 if (xive
->regs
[CQ_TAR
>> 3] & CQ_TAR_TBL_AUTOINC
) {
616 xive
->regs
[CQ_TAR
>> 3] =
617 SETFIELD(CQ_TAR_TSEL_INDEX
, xive
->regs
[CQ_TAR
>> 3], ++tsel_index
);
621 * EDT configuration is complete. Resize the MMIO windows exposing
622 * the IPI and the END ESBs in the VC region.
624 if (tsel
== CQ_TAR_TSEL_EDT
&& tsel_index
== ARRAY_SIZE(xive
->edt
)) {
625 pnv_xive_edt_resize(xive
);
632 * Virtual Structure Tables (VST) configuration
634 static void pnv_xive_vst_set_exclusive(PnvXive
*xive
, uint8_t type
,
635 uint8_t blk
, uint64_t vsd
)
637 XiveENDSource
*end_xsrc
= &xive
->end_source
;
638 XiveSource
*xsrc
= &xive
->ipi_source
;
639 const XiveVstInfo
*info
= &vst_infos
[type
];
640 uint32_t page_shift
= GETFIELD(VSD_TSIZE
, vsd
) + 12;
641 uint64_t vst_tsize
= 1ull << page_shift
;
642 uint64_t vst_addr
= vsd
& VSD_ADDRESS_MASK
;
646 if (VSD_INDIRECT
& vsd
) {
647 if (!(xive
->regs
[VC_GLOBAL_CONFIG
>> 3] & VC_GCONF_INDIRECT
)) {
648 xive_error(xive
, "VST: %s indirect tables are not enabled",
653 if (!pnv_xive_vst_page_size_allowed(page_shift
)) {
654 xive_error(xive
, "VST: invalid %s page shift %d", info
->name
,
660 if (!QEMU_IS_ALIGNED(vst_addr
, 1ull << page_shift
)) {
661 xive_error(xive
, "VST: %s table address 0x%"PRIx64
" is not aligned with"
662 " page shift %d", info
->name
, vst_addr
, page_shift
);
666 /* Record the table configuration (in SRAM on HW) */
667 xive
->vsds
[type
][blk
] = vsd
;
669 /* Now tune the models with the configuration provided by the FW */
672 case VST_TSEL_IVT
: /* Nothing to be done */
677 * Backing store pages for the END.
679 * If the table is direct, we can compute the number of PQ
680 * entries provisioned by FW (such as skiboot) and resize the
681 * END ESB window accordingly.
683 if (!(VSD_INDIRECT
& vsd
)) {
684 memory_region_set_size(&end_xsrc
->esb_mmio
, (vst_tsize
/ info
->size
)
685 * (1ull << xsrc
->esb_shift
));
687 memory_region_add_subregion(&xive
->end_edt_mmio
, 0,
688 &end_xsrc
->esb_mmio
);
693 * Backing store pages for the source PQ bits. The model does
694 * not use these PQ bits backed in RAM because the XiveSource
697 * If the table is direct, we can compute the number of PQ
698 * entries provisioned by FW (such as skiboot) and resize the
699 * ESB window accordingly.
701 if (!(VSD_INDIRECT
& vsd
)) {
702 memory_region_set_size(&xsrc
->esb_mmio
, vst_tsize
* SBE_PER_BYTE
703 * (1ull << xsrc
->esb_shift
));
705 memory_region_add_subregion(&xive
->ipi_edt_mmio
, 0, &xsrc
->esb_mmio
);
708 case VST_TSEL_VPDT
: /* Not modeled */
709 case VST_TSEL_IRQ
: /* Not modeled */
711 * These tables contains the backing store pages for the
712 * interrupt fifos of the VC sub-engine in case of overflow.
717 g_assert_not_reached();
722 * Both PC and VC sub-engines are configured as each use the Virtual
723 * Structure Tables : SBE, EAS, END and NVT.
725 static void pnv_xive_vst_set_data(PnvXive
*xive
, uint64_t vsd
, bool pc_engine
)
727 uint8_t mode
= GETFIELD(VSD_MODE
, vsd
);
728 uint8_t type
= GETFIELD(VST_TABLE_SELECT
,
729 xive
->regs
[VC_VSD_TABLE_ADDR
>> 3]);
730 uint8_t blk
= GETFIELD(VST_TABLE_BLOCK
,
731 xive
->regs
[VC_VSD_TABLE_ADDR
>> 3]);
732 uint64_t vst_addr
= vsd
& VSD_ADDRESS_MASK
;
734 if (type
> VST_TSEL_IRQ
) {
735 xive_error(xive
, "VST: invalid table type %d", type
);
739 if (blk
>= vst_infos
[type
].max_blocks
) {
740 xive_error(xive
, "VST: invalid block id %d for"
741 " %s table", blk
, vst_infos
[type
].name
);
746 * Only take the VC sub-engine configuration into account because
747 * the XiveRouter model combines both VC and PC sub-engines
754 xive_error(xive
, "VST: invalid %s table address", vst_infos
[type
].name
);
759 case VSD_MODE_FORWARD
:
760 xive
->vsds
[type
][blk
] = vsd
;
763 case VSD_MODE_EXCLUSIVE
:
764 pnv_xive_vst_set_exclusive(xive
, type
, blk
, vsd
);
768 xive_error(xive
, "VST: unsupported table mode %d", mode
);
774 * Interrupt controller MMIO region. The layout is compatible between
777 * Page 0 sub-engine BARs
778 * 0x000 - 0x3FF IC registers
779 * 0x400 - 0x7FF PC registers
780 * 0x800 - 0xFFF VC registers
782 * Page 1 Notify page (writes only)
783 * 0x000 - 0x7FF HW interrupt triggers (PSI, PHB)
784 * 0x800 - 0xFFF forwards and syncs
786 * Page 2 LSI Trigger page (writes only) (not modeled)
787 * Page 3 LSI SB EOI page (reads only) (not modeled)
789 * Page 4-7 indirect TIMA
793 * IC - registers MMIO
795 static void pnv_xive_ic_reg_write(void *opaque
, hwaddr offset
,
796 uint64_t val
, unsigned size
)
798 PnvXive
*xive
= PNV_XIVE(opaque
);
799 MemoryRegion
*sysmem
= get_system_memory();
800 uint32_t reg
= offset
>> 3;
801 bool is_chip0
= xive
->chip
->chip_id
== 0;
806 * XIVE CQ (PowerBus bridge) settings
808 case CQ_MSGSND
: /* msgsnd for doorbells */
809 case CQ_FIRMASK_OR
: /* FIR error reporting */
812 if (val
& CQ_PBI_PC_64K
) {
815 if (val
& CQ_PBI_VC_64K
) {
819 case CQ_CFG_PB_GEN
: /* PowerBus General Configuration */
821 * TODO: CQ_INT_ADDR_OPT for 1-block-per-chip mode
826 * XIVE Virtualization Controller settings
828 case VC_GLOBAL_CONFIG
:
832 * XIVE Presenter Controller settings
834 case PC_GLOBAL_CONFIG
:
836 * PC_GCONF_CHIPID_OVR
837 * Overrides Int command Chip ID with the Chip ID field (DEBUG)
842 * TODO: block group support
844 * PC_TCTXT_CFG_BLKGRP_EN
845 * PC_TCTXT_CFG_HARD_CHIPID_BLK :
846 * Moves the chipid into block field for hardwired CAM compares.
847 * Block offset value is adjusted to 0b0..01 & ThrdId
849 * Will require changes in xive_presenter_tctx_match(). I am
850 * not sure how to handle that yet.
853 /* Overrides hardwired chip ID with the chip ID field */
854 if (val
& PC_TCTXT_CHIPID_OVERRIDE
) {
855 xive
->tctx_chipid
= GETFIELD(PC_TCTXT_CHIPID
, val
);
861 * enable block tracking and exchange of block ownership
862 * information between Interrupt controllers
869 case VC_SBC_CONFIG
: /* Store EOI configuration */
871 * Configure store EOI if required by firwmare (skiboot has removed
872 * support recently though)
874 if (val
& (VC_SBC_CONF_CPLX_CIST
| VC_SBC_CONF_CIST_BOTH
)) {
875 xive
->ipi_source
.esb_flags
|= XIVE_SRC_STORE_EOI
;
879 case VC_EQC_CONFIG
: /* TODO: silent escalation */
880 case VC_AIB_TX_ORDER_TAG2
: /* relax ordering */
884 * XIVE BAR settings (XSCOM only)
887 /* bit4: resets all BAR registers */
890 case CQ_IC_BAR
: /* IC BAR. 8 pages */
891 xive
->ic_shift
= val
& CQ_IC_BAR_64K
? 16 : 12;
892 if (!(val
& CQ_IC_BAR_VALID
)) {
894 if (xive
->regs
[reg
] & CQ_IC_BAR_VALID
) {
895 memory_region_del_subregion(&xive
->ic_mmio
,
897 memory_region_del_subregion(&xive
->ic_mmio
,
898 &xive
->ic_notify_mmio
);
899 memory_region_del_subregion(&xive
->ic_mmio
,
901 memory_region_del_subregion(&xive
->ic_mmio
,
902 &xive
->tm_indirect_mmio
);
904 memory_region_del_subregion(sysmem
, &xive
->ic_mmio
);
907 xive
->ic_base
= val
& ~(CQ_IC_BAR_VALID
| CQ_IC_BAR_64K
);
908 if (!(xive
->regs
[reg
] & CQ_IC_BAR_VALID
)) {
909 memory_region_add_subregion(sysmem
, xive
->ic_base
,
912 memory_region_add_subregion(&xive
->ic_mmio
, 0,
914 memory_region_add_subregion(&xive
->ic_mmio
,
915 1ul << xive
->ic_shift
,
916 &xive
->ic_notify_mmio
);
917 memory_region_add_subregion(&xive
->ic_mmio
,
918 2ul << xive
->ic_shift
,
920 memory_region_add_subregion(&xive
->ic_mmio
,
921 4ull << xive
->ic_shift
,
922 &xive
->tm_indirect_mmio
);
927 case CQ_TM1_BAR
: /* TM BAR. 4 pages. Map only once */
928 case CQ_TM2_BAR
: /* second TM BAR. for hotplug. Not modeled */
929 xive
->tm_shift
= val
& CQ_TM_BAR_64K
? 16 : 12;
930 if (!(val
& CQ_TM_BAR_VALID
)) {
932 if (xive
->regs
[reg
] & CQ_TM_BAR_VALID
&& is_chip0
) {
933 memory_region_del_subregion(sysmem
, &xive
->tm_mmio
);
936 xive
->tm_base
= val
& ~(CQ_TM_BAR_VALID
| CQ_TM_BAR_64K
);
937 if (!(xive
->regs
[reg
] & CQ_TM_BAR_VALID
) && is_chip0
) {
938 memory_region_add_subregion(sysmem
, xive
->tm_base
,
945 xive
->regs
[reg
] = val
;
946 memory_region_set_size(&xive
->pc_mmio
, pnv_xive_pc_size(xive
));
948 case CQ_PC_BAR
: /* From 32M to 512G */
949 if (!(val
& CQ_PC_BAR_VALID
)) {
951 if (xive
->regs
[reg
] & CQ_PC_BAR_VALID
) {
952 memory_region_del_subregion(sysmem
, &xive
->pc_mmio
);
955 xive
->pc_base
= val
& ~(CQ_PC_BAR_VALID
);
956 if (!(xive
->regs
[reg
] & CQ_PC_BAR_VALID
)) {
957 memory_region_add_subregion(sysmem
, xive
->pc_base
,
964 xive
->regs
[reg
] = val
;
965 memory_region_set_size(&xive
->vc_mmio
, pnv_xive_vc_size(xive
));
967 case CQ_VC_BAR
: /* From 64M to 4TB */
968 if (!(val
& CQ_VC_BAR_VALID
)) {
970 if (xive
->regs
[reg
] & CQ_VC_BAR_VALID
) {
971 memory_region_del_subregion(sysmem
, &xive
->vc_mmio
);
974 xive
->vc_base
= val
& ~(CQ_VC_BAR_VALID
);
975 if (!(xive
->regs
[reg
] & CQ_VC_BAR_VALID
)) {
976 memory_region_add_subregion(sysmem
, xive
->vc_base
,
983 * XIVE Table settings.
985 case CQ_TAR
: /* Table Address */
987 case CQ_TDR
: /* Table Data */
988 pnv_xive_table_set_data(xive
, val
);
992 * XIVE VC & PC Virtual Structure Table settings
994 case VC_VSD_TABLE_ADDR
:
995 case PC_VSD_TABLE_ADDR
: /* Virtual table selector */
997 case VC_VSD_TABLE_DATA
: /* Virtual table setting */
998 case PC_VSD_TABLE_DATA
:
999 pnv_xive_vst_set_data(xive
, val
, offset
== PC_VSD_TABLE_DATA
);
1003 * Interrupt fifo overflow in memory backing store (Not modeled)
1005 case VC_IRQ_CONFIG_IPI
:
1006 case VC_IRQ_CONFIG_HW
:
1007 case VC_IRQ_CONFIG_CASCADE1
:
1008 case VC_IRQ_CONFIG_CASCADE2
:
1009 case VC_IRQ_CONFIG_REDIST
:
1010 case VC_IRQ_CONFIG_IPI_CASC
:
1014 * XIVE hardware thread enablement
1016 case PC_THREAD_EN_REG0
: /* Physical Thread Enable */
1017 case PC_THREAD_EN_REG1
: /* Physical Thread Enable (fused core) */
1020 case PC_THREAD_EN_REG0_SET
:
1021 xive
->regs
[PC_THREAD_EN_REG0
>> 3] |= val
;
1023 case PC_THREAD_EN_REG1_SET
:
1024 xive
->regs
[PC_THREAD_EN_REG1
>> 3] |= val
;
1026 case PC_THREAD_EN_REG0_CLR
:
1027 xive
->regs
[PC_THREAD_EN_REG0
>> 3] &= ~val
;
1029 case PC_THREAD_EN_REG1_CLR
:
1030 xive
->regs
[PC_THREAD_EN_REG1
>> 3] &= ~val
;
1034 * Indirect TIMA access set up. Defines the PIR of the HW thread
1037 case PC_TCTXT_INDIR0
... PC_TCTXT_INDIR3
:
1041 * XIVE PC & VC cache updates for EAS, NVT and END
1043 case VC_IVC_SCRUB_MASK
:
1044 case VC_IVC_SCRUB_TRIG
:
1047 case VC_EQC_CWATCH_SPEC
:
1048 val
&= ~VC_EQC_CWATCH_CONFLICT
; /* HW resets this bit */
1050 case VC_EQC_CWATCH_DAT1
... VC_EQC_CWATCH_DAT3
:
1052 case VC_EQC_CWATCH_DAT0
:
1053 /* writing to DATA0 triggers the cache write */
1054 xive
->regs
[reg
] = val
;
1055 pnv_xive_end_update(xive
);
1057 case VC_EQC_SCRUB_MASK
:
1058 case VC_EQC_SCRUB_TRIG
:
1060 * The scrubbing registers flush the cache in RAM and can also
1065 case PC_VPC_CWATCH_SPEC
:
1066 val
&= ~PC_VPC_CWATCH_CONFLICT
; /* HW resets this bit */
1068 case PC_VPC_CWATCH_DAT1
... PC_VPC_CWATCH_DAT7
:
1070 case PC_VPC_CWATCH_DAT0
:
1071 /* writing to DATA0 triggers the cache write */
1072 xive
->regs
[reg
] = val
;
1073 pnv_xive_nvt_update(xive
);
1075 case PC_VPC_SCRUB_MASK
:
1076 case PC_VPC_SCRUB_TRIG
:
1078 * The scrubbing registers flush the cache in RAM and can also
1085 * XIVE PC & VC cache invalidation
1089 case VC_AT_MACRO_KILL
:
1091 case PC_AT_KILL_MASK
:
1092 case VC_AT_MACRO_KILL_MASK
:
1096 xive_error(xive
, "IC: invalid write to reg=0x%"HWADDR_PRIx
, offset
);
1100 xive
->regs
[reg
] = val
;
1103 static uint64_t pnv_xive_ic_reg_read(void *opaque
, hwaddr offset
, unsigned size
)
1105 PnvXive
*xive
= PNV_XIVE(opaque
);
1107 uint32_t reg
= offset
>> 3;
1123 case PC_TCTXT_TRACK
:
1124 case PC_TCTXT_INDIR0
:
1125 case PC_TCTXT_INDIR1
:
1126 case PC_TCTXT_INDIR2
:
1127 case PC_TCTXT_INDIR3
:
1128 case PC_GLOBAL_CONFIG
:
1130 case PC_VPC_SCRUB_MASK
:
1132 case VC_GLOBAL_CONFIG
:
1133 case VC_AIB_TX_ORDER_TAG2
:
1135 case VC_IRQ_CONFIG_IPI
:
1136 case VC_IRQ_CONFIG_HW
:
1137 case VC_IRQ_CONFIG_CASCADE1
:
1138 case VC_IRQ_CONFIG_CASCADE2
:
1139 case VC_IRQ_CONFIG_REDIST
:
1140 case VC_IRQ_CONFIG_IPI_CASC
:
1142 case VC_EQC_SCRUB_MASK
:
1143 case VC_IVC_SCRUB_MASK
:
1145 case VC_AT_MACRO_KILL_MASK
:
1146 case VC_VSD_TABLE_ADDR
:
1147 case PC_VSD_TABLE_ADDR
:
1148 case VC_VSD_TABLE_DATA
:
1149 case PC_VSD_TABLE_DATA
:
1150 case PC_THREAD_EN_REG0
:
1151 case PC_THREAD_EN_REG1
:
1152 val
= xive
->regs
[reg
];
1156 * XIVE hardware thread enablement
1158 case PC_THREAD_EN_REG0_SET
:
1159 case PC_THREAD_EN_REG0_CLR
:
1160 val
= xive
->regs
[PC_THREAD_EN_REG0
>> 3];
1162 case PC_THREAD_EN_REG1_SET
:
1163 case PC_THREAD_EN_REG1_CLR
:
1164 val
= xive
->regs
[PC_THREAD_EN_REG1
>> 3];
1167 case CQ_MSGSND
: /* Identifies which cores have msgsnd enabled. */
1168 val
= 0xffffff0000000000;
1172 * XIVE PC & VC cache updates for EAS, NVT and END
1174 case VC_EQC_CWATCH_SPEC
:
1175 xive
->regs
[reg
] = ~(VC_EQC_CWATCH_FULL
| VC_EQC_CWATCH_CONFLICT
);
1176 val
= xive
->regs
[reg
];
1178 case VC_EQC_CWATCH_DAT0
:
1180 * Load DATA registers from cache with data requested by the
1183 pnv_xive_end_cache_load(xive
);
1184 val
= xive
->regs
[reg
];
1186 case VC_EQC_CWATCH_DAT1
... VC_EQC_CWATCH_DAT3
:
1187 val
= xive
->regs
[reg
];
1190 case PC_VPC_CWATCH_SPEC
:
1191 xive
->regs
[reg
] = ~(PC_VPC_CWATCH_FULL
| PC_VPC_CWATCH_CONFLICT
);
1192 val
= xive
->regs
[reg
];
1194 case PC_VPC_CWATCH_DAT0
:
1196 * Load DATA registers from cache with data requested by the
1199 pnv_xive_nvt_cache_load(xive
);
1200 val
= xive
->regs
[reg
];
1202 case PC_VPC_CWATCH_DAT1
... PC_VPC_CWATCH_DAT7
:
1203 val
= xive
->regs
[reg
];
1206 case PC_VPC_SCRUB_TRIG
:
1207 case VC_IVC_SCRUB_TRIG
:
1208 case VC_EQC_SCRUB_TRIG
:
1209 xive
->regs
[reg
] &= ~VC_SCRUB_VALID
;
1210 val
= xive
->regs
[reg
];
1214 * XIVE PC & VC cache invalidation
1217 xive
->regs
[reg
] &= ~PC_AT_KILL_VALID
;
1218 val
= xive
->regs
[reg
];
1220 case VC_AT_MACRO_KILL
:
1221 xive
->regs
[reg
] &= ~VC_KILL_VALID
;
1222 val
= xive
->regs
[reg
];
1226 * XIVE synchronisation
1229 val
= VC_EQC_SYNC_MASK
;
1233 xive_error(xive
, "IC: invalid read reg=0x%"HWADDR_PRIx
, offset
);
1239 static const MemoryRegionOps pnv_xive_ic_reg_ops
= {
1240 .read
= pnv_xive_ic_reg_read
,
1241 .write
= pnv_xive_ic_reg_write
,
1242 .endianness
= DEVICE_BIG_ENDIAN
,
1244 .min_access_size
= 8,
1245 .max_access_size
= 8,
1248 .min_access_size
= 8,
1249 .max_access_size
= 8,
1254 * IC - Notify MMIO port page (write only)
1256 #define PNV_XIVE_FORWARD_IPI 0x800 /* Forward IPI */
1257 #define PNV_XIVE_FORWARD_HW 0x880 /* Forward HW */
1258 #define PNV_XIVE_FORWARD_OS_ESC 0x900 /* Forward OS escalation */
1259 #define PNV_XIVE_FORWARD_HW_ESC 0x980 /* Forward Hyp escalation */
1260 #define PNV_XIVE_FORWARD_REDIS 0xa00 /* Forward Redistribution */
1261 #define PNV_XIVE_RESERVED5 0xa80 /* Cache line 5 PowerBUS operation */
1262 #define PNV_XIVE_RESERVED6 0xb00 /* Cache line 6 PowerBUS operation */
1263 #define PNV_XIVE_RESERVED7 0xb80 /* Cache line 7 PowerBUS operation */
1265 /* VC synchronisation */
1266 #define PNV_XIVE_SYNC_IPI 0xc00 /* Sync IPI */
1267 #define PNV_XIVE_SYNC_HW 0xc80 /* Sync HW */
1268 #define PNV_XIVE_SYNC_OS_ESC 0xd00 /* Sync OS escalation */
1269 #define PNV_XIVE_SYNC_HW_ESC 0xd80 /* Sync Hyp escalation */
1270 #define PNV_XIVE_SYNC_REDIS 0xe00 /* Sync Redistribution */
1272 /* PC synchronisation */
1273 #define PNV_XIVE_SYNC_PULL 0xe80 /* Sync pull context */
1274 #define PNV_XIVE_SYNC_PUSH 0xf00 /* Sync push context */
1275 #define PNV_XIVE_SYNC_VPC 0xf80 /* Sync remove VPC store */
1277 static void pnv_xive_ic_hw_trigger(PnvXive
*xive
, hwaddr addr
, uint64_t val
)
1282 if (val
& XIVE_TRIGGER_END
) {
1283 xive_error(xive
, "IC: END trigger at @0x%"HWADDR_PRIx
" data 0x%"PRIx64
,
1289 * Forward the source event notification directly to the Router.
1290 * The source interrupt number should already be correctly encoded
1291 * with the chip block id by the sending device (PHB, PSI).
1293 blk
= XIVE_EAS_BLOCK(val
);
1294 idx
= XIVE_EAS_INDEX(val
);
1296 xive_router_notify(XIVE_NOTIFIER(xive
), XIVE_EAS(blk
, idx
));
1299 static void pnv_xive_ic_notify_write(void *opaque
, hwaddr addr
, uint64_t val
,
1302 PnvXive
*xive
= PNV_XIVE(opaque
);
1304 /* VC: HW triggers */
1306 case 0x000 ... 0x7FF:
1307 pnv_xive_ic_hw_trigger(opaque
, addr
, val
);
1310 /* VC: Forwarded IRQs */
1311 case PNV_XIVE_FORWARD_IPI
:
1312 case PNV_XIVE_FORWARD_HW
:
1313 case PNV_XIVE_FORWARD_OS_ESC
:
1314 case PNV_XIVE_FORWARD_HW_ESC
:
1315 case PNV_XIVE_FORWARD_REDIS
:
1316 /* TODO: forwarded IRQs. Should be like HW triggers */
1317 xive_error(xive
, "IC: forwarded at @0x%"HWADDR_PRIx
" IRQ 0x%"PRIx64
,
1322 case PNV_XIVE_SYNC_IPI
:
1323 case PNV_XIVE_SYNC_HW
:
1324 case PNV_XIVE_SYNC_OS_ESC
:
1325 case PNV_XIVE_SYNC_HW_ESC
:
1326 case PNV_XIVE_SYNC_REDIS
:
1330 case PNV_XIVE_SYNC_PULL
:
1331 case PNV_XIVE_SYNC_PUSH
:
1332 case PNV_XIVE_SYNC_VPC
:
1336 xive_error(xive
, "IC: invalid notify write @%"HWADDR_PRIx
, addr
);
1340 static uint64_t pnv_xive_ic_notify_read(void *opaque
, hwaddr addr
,
1343 PnvXive
*xive
= PNV_XIVE(opaque
);
1345 /* loads are invalid */
1346 xive_error(xive
, "IC: invalid notify read @%"HWADDR_PRIx
, addr
);
1350 static const MemoryRegionOps pnv_xive_ic_notify_ops
= {
1351 .read
= pnv_xive_ic_notify_read
,
1352 .write
= pnv_xive_ic_notify_write
,
1353 .endianness
= DEVICE_BIG_ENDIAN
,
1355 .min_access_size
= 8,
1356 .max_access_size
= 8,
1359 .min_access_size
= 8,
1360 .max_access_size
= 8,
1365 * IC - LSI MMIO handlers (not modeled)
1368 static void pnv_xive_ic_lsi_write(void *opaque
, hwaddr addr
,
1369 uint64_t val
, unsigned size
)
1371 PnvXive
*xive
= PNV_XIVE(opaque
);
1373 xive_error(xive
, "IC: LSI invalid write @%"HWADDR_PRIx
, addr
);
1376 static uint64_t pnv_xive_ic_lsi_read(void *opaque
, hwaddr addr
, unsigned size
)
1378 PnvXive
*xive
= PNV_XIVE(opaque
);
1380 xive_error(xive
, "IC: LSI invalid read @%"HWADDR_PRIx
, addr
);
1384 static const MemoryRegionOps pnv_xive_ic_lsi_ops
= {
1385 .read
= pnv_xive_ic_lsi_read
,
1386 .write
= pnv_xive_ic_lsi_write
,
1387 .endianness
= DEVICE_BIG_ENDIAN
,
1389 .min_access_size
= 8,
1390 .max_access_size
= 8,
1393 .min_access_size
= 8,
1394 .max_access_size
= 8,
1399 * IC - Indirect TIMA MMIO handlers
1403 * When the TIMA is accessed from the indirect page, the thread id of
1404 * the target CPU is configured in the PC_TCTXT_INDIR0 register before
1405 * use. This is used for resets and for debug purpose also.
1407 static XiveTCTX
*pnv_xive_get_indirect_tctx(PnvXive
*xive
)
1409 PnvChip
*chip
= xive
->chip
;
1410 uint64_t tctxt_indir
= xive
->regs
[PC_TCTXT_INDIR0
>> 3];
1411 PowerPCCPU
*cpu
= NULL
;
1414 if (!(tctxt_indir
& PC_TCTXT_INDIR_VALID
)) {
1415 xive_error(xive
, "IC: no indirect TIMA access in progress");
1419 pir
= (chip
->chip_id
<< 8) | GETFIELD(PC_TCTXT_INDIR_THRDID
, tctxt_indir
);
1420 cpu
= pnv_chip_find_cpu(chip
, pir
);
1422 xive_error(xive
, "IC: invalid PIR %x for indirect access", pir
);
1426 /* Check that HW thread is XIVE enabled */
1427 if (!pnv_xive_is_cpu_enabled(xive
, cpu
)) {
1428 xive_error(xive
, "IC: CPU %x is not enabled", pir
);
1431 return XIVE_TCTX(pnv_cpu_state(cpu
)->intc
);
1434 static void xive_tm_indirect_write(void *opaque
, hwaddr offset
,
1435 uint64_t value
, unsigned size
)
1437 XiveTCTX
*tctx
= pnv_xive_get_indirect_tctx(PNV_XIVE(opaque
));
1439 xive_tctx_tm_write(tctx
, offset
, value
, size
);
1442 static uint64_t xive_tm_indirect_read(void *opaque
, hwaddr offset
,
1445 XiveTCTX
*tctx
= pnv_xive_get_indirect_tctx(PNV_XIVE(opaque
));
1447 return xive_tctx_tm_read(tctx
, offset
, size
);
1450 static const MemoryRegionOps xive_tm_indirect_ops
= {
1451 .read
= xive_tm_indirect_read
,
1452 .write
= xive_tm_indirect_write
,
1453 .endianness
= DEVICE_BIG_ENDIAN
,
1455 .min_access_size
= 1,
1456 .max_access_size
= 8,
1459 .min_access_size
= 1,
1460 .max_access_size
= 8,
1465 * Interrupt controller XSCOM region.
1467 static uint64_t pnv_xive_xscom_read(void *opaque
, hwaddr addr
, unsigned size
)
1469 switch (addr
>> 3) {
1470 case X_VC_EQC_CONFIG
:
1471 /* FIXME (skiboot): This is the only XSCOM load. Bizarre. */
1472 return VC_EQC_SYNC_MASK
;
1474 return pnv_xive_ic_reg_read(opaque
, addr
, size
);
1478 static void pnv_xive_xscom_write(void *opaque
, hwaddr addr
,
1479 uint64_t val
, unsigned size
)
1481 pnv_xive_ic_reg_write(opaque
, addr
, val
, size
);
1484 static const MemoryRegionOps pnv_xive_xscom_ops
= {
1485 .read
= pnv_xive_xscom_read
,
1486 .write
= pnv_xive_xscom_write
,
1487 .endianness
= DEVICE_BIG_ENDIAN
,
1489 .min_access_size
= 8,
1490 .max_access_size
= 8,
1493 .min_access_size
= 8,
1494 .max_access_size
= 8,
1499 * Virtualization Controller MMIO region containing the IPI and END ESB pages
1501 static uint64_t pnv_xive_vc_read(void *opaque
, hwaddr offset
,
1504 PnvXive
*xive
= PNV_XIVE(opaque
);
1505 uint64_t edt_index
= offset
>> pnv_xive_edt_shift(xive
);
1506 uint64_t edt_type
= 0;
1507 uint64_t edt_offset
;
1509 AddressSpace
*edt_as
= NULL
;
1512 if (edt_index
< XIVE_TABLE_EDT_MAX
) {
1513 edt_type
= GETFIELD(CQ_TDR_EDT_TYPE
, xive
->edt
[edt_index
]);
1517 case CQ_TDR_EDT_IPI
:
1518 edt_as
= &xive
->ipi_as
;
1521 edt_as
= &xive
->end_as
;
1524 xive_error(xive
, "VC: invalid EDT type for read @%"HWADDR_PRIx
, offset
);
1528 /* Remap the offset for the targeted address space */
1529 edt_offset
= pnv_xive_edt_offset(xive
, offset
, edt_type
);
1531 ret
= address_space_ldq(edt_as
, edt_offset
, MEMTXATTRS_UNSPECIFIED
,
1534 if (result
!= MEMTX_OK
) {
1535 xive_error(xive
, "VC: %s read failed at @0x%"HWADDR_PRIx
" -> @0x%"
1536 HWADDR_PRIx
, edt_type
== CQ_TDR_EDT_IPI
? "IPI" : "END",
1537 offset
, edt_offset
);
1544 static void pnv_xive_vc_write(void *opaque
, hwaddr offset
,
1545 uint64_t val
, unsigned size
)
1547 PnvXive
*xive
= PNV_XIVE(opaque
);
1548 uint64_t edt_index
= offset
>> pnv_xive_edt_shift(xive
);
1549 uint64_t edt_type
= 0;
1550 uint64_t edt_offset
;
1552 AddressSpace
*edt_as
= NULL
;
1554 if (edt_index
< XIVE_TABLE_EDT_MAX
) {
1555 edt_type
= GETFIELD(CQ_TDR_EDT_TYPE
, xive
->edt
[edt_index
]);
1559 case CQ_TDR_EDT_IPI
:
1560 edt_as
= &xive
->ipi_as
;
1563 edt_as
= &xive
->end_as
;
1566 xive_error(xive
, "VC: invalid EDT type for write @%"HWADDR_PRIx
,
1571 /* Remap the offset for the targeted address space */
1572 edt_offset
= pnv_xive_edt_offset(xive
, offset
, edt_type
);
1574 address_space_stq(edt_as
, edt_offset
, val
, MEMTXATTRS_UNSPECIFIED
, &result
);
1575 if (result
!= MEMTX_OK
) {
1576 xive_error(xive
, "VC: write failed at @0x%"HWADDR_PRIx
, edt_offset
);
1580 static const MemoryRegionOps pnv_xive_vc_ops
= {
1581 .read
= pnv_xive_vc_read
,
1582 .write
= pnv_xive_vc_write
,
1583 .endianness
= DEVICE_BIG_ENDIAN
,
1585 .min_access_size
= 8,
1586 .max_access_size
= 8,
1589 .min_access_size
= 8,
1590 .max_access_size
= 8,
1595 * Presenter Controller MMIO region. The Virtualization Controller
1596 * updates the IPB in the NVT table when required. Not modeled.
1598 static uint64_t pnv_xive_pc_read(void *opaque
, hwaddr addr
,
1601 PnvXive
*xive
= PNV_XIVE(opaque
);
1603 xive_error(xive
, "PC: invalid read @%"HWADDR_PRIx
, addr
);
1607 static void pnv_xive_pc_write(void *opaque
, hwaddr addr
,
1608 uint64_t value
, unsigned size
)
1610 PnvXive
*xive
= PNV_XIVE(opaque
);
1612 xive_error(xive
, "PC: invalid write to VC @%"HWADDR_PRIx
, addr
);
1615 static const MemoryRegionOps pnv_xive_pc_ops
= {
1616 .read
= pnv_xive_pc_read
,
1617 .write
= pnv_xive_pc_write
,
1618 .endianness
= DEVICE_BIG_ENDIAN
,
1620 .min_access_size
= 8,
1621 .max_access_size
= 8,
1624 .min_access_size
= 8,
1625 .max_access_size
= 8,
1629 void pnv_xive_pic_print_info(PnvXive
*xive
, Monitor
*mon
)
1631 XiveRouter
*xrtr
= XIVE_ROUTER(xive
);
1632 uint8_t blk
= xive
->chip
->chip_id
;
1633 uint32_t srcno0
= XIVE_EAS(blk
, 0);
1634 uint32_t nr_ipis
= pnv_xive_nr_ipis(xive
, blk
);
1639 monitor_printf(mon
, "XIVE[%x] Source %08x .. %08x\n", blk
, srcno0
,
1640 srcno0
+ nr_ipis
- 1);
1641 xive_source_pic_print_info(&xive
->ipi_source
, srcno0
, mon
);
1643 monitor_printf(mon
, "XIVE[%x] EAT %08x .. %08x\n", blk
, srcno0
,
1644 srcno0
+ nr_ipis
- 1);
1645 for (i
= 0; i
< nr_ipis
; i
++) {
1646 if (xive_router_get_eas(xrtr
, blk
, i
, &eas
)) {
1649 if (!xive_eas_is_masked(&eas
)) {
1650 xive_eas_pic_print_info(&eas
, i
, mon
);
1654 monitor_printf(mon
, "XIVE[%x] ENDT\n", blk
);
1656 while (!xive_router_get_end(xrtr
, blk
, i
, &end
)) {
1657 xive_end_pic_print_info(&end
, i
++, mon
);
1660 monitor_printf(mon
, "XIVE[%x] END Escalation EAT\n", blk
);
1662 while (!xive_router_get_end(xrtr
, blk
, i
, &end
)) {
1663 xive_end_eas_pic_print_info(&end
, i
++, mon
);
1667 static void pnv_xive_reset(void *dev
)
1669 PnvXive
*xive
= PNV_XIVE(dev
);
1670 XiveSource
*xsrc
= &xive
->ipi_source
;
1671 XiveENDSource
*end_xsrc
= &xive
->end_source
;
1674 * Use the PnvChip id to identify the XIVE interrupt controller.
1675 * It can be overriden by configuration at runtime.
1677 xive
->tctx_chipid
= xive
->chip
->chip_id
;
1679 /* Default page size (Should be changed at runtime to 64k) */
1680 xive
->ic_shift
= xive
->vc_shift
= xive
->pc_shift
= 12;
1682 /* Clear subregions */
1683 if (memory_region_is_mapped(&xsrc
->esb_mmio
)) {
1684 memory_region_del_subregion(&xive
->ipi_edt_mmio
, &xsrc
->esb_mmio
);
1687 if (memory_region_is_mapped(&xive
->ipi_edt_mmio
)) {
1688 memory_region_del_subregion(&xive
->ipi_mmio
, &xive
->ipi_edt_mmio
);
1691 if (memory_region_is_mapped(&end_xsrc
->esb_mmio
)) {
1692 memory_region_del_subregion(&xive
->end_edt_mmio
, &end_xsrc
->esb_mmio
);
1695 if (memory_region_is_mapped(&xive
->end_edt_mmio
)) {
1696 memory_region_del_subregion(&xive
->end_mmio
, &xive
->end_edt_mmio
);
1700 static void pnv_xive_init(Object
*obj
)
1702 PnvXive
*xive
= PNV_XIVE(obj
);
1704 object_initialize_child(obj
, "ipi_source", &xive
->ipi_source
,
1705 sizeof(xive
->ipi_source
), TYPE_XIVE_SOURCE
,
1706 &error_abort
, NULL
);
1707 object_initialize_child(obj
, "end_source", &xive
->end_source
,
1708 sizeof(xive
->end_source
), TYPE_XIVE_END_SOURCE
,
1709 &error_abort
, NULL
);
1713 * Maximum number of IRQs and ENDs supported by HW
1715 #define PNV_XIVE_NR_IRQS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1716 #define PNV_XIVE_NR_ENDS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1718 static void pnv_xive_realize(DeviceState
*dev
, Error
**errp
)
1720 PnvXive
*xive
= PNV_XIVE(dev
);
1721 XiveSource
*xsrc
= &xive
->ipi_source
;
1722 XiveENDSource
*end_xsrc
= &xive
->end_source
;
1723 Error
*local_err
= NULL
;
1728 * The XiveSource and XiveENDSource objects are realized with the
1729 * maximum allowed HW configuration. The ESB MMIO regions will be
1730 * resized dynamically when the controller is configured by the FW
1731 * to limit accesses to resources not provisioned.
1733 object_property_set_int(OBJECT(xsrc
), PNV_XIVE_NR_IRQS
, "nr-irqs",
1735 object_property_set_link(OBJECT(xsrc
), OBJECT(xive
), "xive",
1737 object_property_set_bool(OBJECT(xsrc
), true, "realized", &local_err
);
1739 error_propagate(errp
, local_err
);
1743 object_property_set_int(OBJECT(end_xsrc
), PNV_XIVE_NR_ENDS
, "nr-ends",
1745 object_property_set_link(OBJECT(end_xsrc
), OBJECT(xive
), "xive",
1747 object_property_set_bool(OBJECT(end_xsrc
), true, "realized", &local_err
);
1749 error_propagate(errp
, local_err
);
1753 /* Default page size. Generally changed at runtime to 64k */
1754 xive
->ic_shift
= xive
->vc_shift
= xive
->pc_shift
= 12;
1756 /* XSCOM region, used for initial configuration of the BARs */
1757 memory_region_init_io(&xive
->xscom_regs
, OBJECT(dev
), &pnv_xive_xscom_ops
,
1758 xive
, "xscom-xive", PNV9_XSCOM_XIVE_SIZE
<< 3);
1760 /* Interrupt controller MMIO regions */
1761 memory_region_init(&xive
->ic_mmio
, OBJECT(dev
), "xive-ic",
1764 memory_region_init_io(&xive
->ic_reg_mmio
, OBJECT(dev
), &pnv_xive_ic_reg_ops
,
1765 xive
, "xive-ic-reg", 1 << xive
->ic_shift
);
1766 memory_region_init_io(&xive
->ic_notify_mmio
, OBJECT(dev
),
1767 &pnv_xive_ic_notify_ops
,
1768 xive
, "xive-ic-notify", 1 << xive
->ic_shift
);
1770 /* The Pervasive LSI trigger and EOI pages (not modeled) */
1771 memory_region_init_io(&xive
->ic_lsi_mmio
, OBJECT(dev
), &pnv_xive_ic_lsi_ops
,
1772 xive
, "xive-ic-lsi", 2 << xive
->ic_shift
);
1774 /* Thread Interrupt Management Area (Indirect) */
1775 memory_region_init_io(&xive
->tm_indirect_mmio
, OBJECT(dev
),
1776 &xive_tm_indirect_ops
,
1777 xive
, "xive-tima-indirect", PNV9_XIVE_TM_SIZE
);
1779 * Overall Virtualization Controller MMIO region containing the
1780 * IPI ESB pages and END ESB pages. The layout is defined by the
1781 * EDT "Domain table" and the accesses are dispatched using
1782 * address spaces for each.
1784 memory_region_init_io(&xive
->vc_mmio
, OBJECT(xive
), &pnv_xive_vc_ops
, xive
,
1785 "xive-vc", PNV9_XIVE_VC_SIZE
);
1787 memory_region_init(&xive
->ipi_mmio
, OBJECT(xive
), "xive-vc-ipi",
1789 address_space_init(&xive
->ipi_as
, &xive
->ipi_mmio
, "xive-vc-ipi");
1790 memory_region_init(&xive
->end_mmio
, OBJECT(xive
), "xive-vc-end",
1792 address_space_init(&xive
->end_as
, &xive
->end_mmio
, "xive-vc-end");
1795 * The MMIO windows exposing the IPI ESBs and the END ESBs in the
1796 * VC region. Their size is configured by the FW in the EDT table.
1798 memory_region_init(&xive
->ipi_edt_mmio
, OBJECT(xive
), "xive-vc-ipi-edt", 0);
1799 memory_region_init(&xive
->end_edt_mmio
, OBJECT(xive
), "xive-vc-end-edt", 0);
1801 /* Presenter Controller MMIO region (not modeled) */
1802 memory_region_init_io(&xive
->pc_mmio
, OBJECT(xive
), &pnv_xive_pc_ops
, xive
,
1803 "xive-pc", PNV9_XIVE_PC_SIZE
);
1805 /* Thread Interrupt Management Area (Direct) */
1806 memory_region_init_io(&xive
->tm_mmio
, OBJECT(xive
), &xive_tm_ops
,
1807 xive
, "xive-tima", PNV9_XIVE_TM_SIZE
);
1809 qemu_register_reset(pnv_xive_reset
, dev
);
1812 static int pnv_xive_dt_xscom(PnvXScomInterface
*dev
, void *fdt
,
1815 const char compat
[] = "ibm,power9-xive-x";
1818 uint32_t lpc_pcba
= PNV9_XSCOM_XIVE_BASE
;
1820 cpu_to_be32(lpc_pcba
),
1821 cpu_to_be32(PNV9_XSCOM_XIVE_SIZE
)
1824 name
= g_strdup_printf("xive@%x", lpc_pcba
);
1825 offset
= fdt_add_subnode(fdt
, xscom_offset
, name
);
1829 _FDT((fdt_setprop(fdt
, offset
, "reg", reg
, sizeof(reg
))));
1830 _FDT((fdt_setprop(fdt
, offset
, "compatible", compat
,
1835 static Property pnv_xive_properties
[] = {
1836 DEFINE_PROP_UINT64("ic-bar", PnvXive
, ic_base
, 0),
1837 DEFINE_PROP_UINT64("vc-bar", PnvXive
, vc_base
, 0),
1838 DEFINE_PROP_UINT64("pc-bar", PnvXive
, pc_base
, 0),
1839 DEFINE_PROP_UINT64("tm-bar", PnvXive
, tm_base
, 0),
1840 /* The PnvChip id identifies the XIVE interrupt controller. */
1841 DEFINE_PROP_LINK("chip", PnvXive
, chip
, TYPE_PNV_CHIP
, PnvChip
*),
1842 DEFINE_PROP_END_OF_LIST(),
1845 static void pnv_xive_class_init(ObjectClass
*klass
, void *data
)
1847 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1848 PnvXScomInterfaceClass
*xdc
= PNV_XSCOM_INTERFACE_CLASS(klass
);
1849 XiveRouterClass
*xrc
= XIVE_ROUTER_CLASS(klass
);
1850 XiveNotifierClass
*xnc
= XIVE_NOTIFIER_CLASS(klass
);
1851 XivePresenterClass
*xpc
= XIVE_PRESENTER_CLASS(klass
);
1853 xdc
->dt_xscom
= pnv_xive_dt_xscom
;
1855 dc
->desc
= "PowerNV XIVE Interrupt Controller";
1856 dc
->realize
= pnv_xive_realize
;
1857 dc
->props
= pnv_xive_properties
;
1859 xrc
->get_eas
= pnv_xive_get_eas
;
1860 xrc
->get_end
= pnv_xive_get_end
;
1861 xrc
->write_end
= pnv_xive_write_end
;
1862 xrc
->get_nvt
= pnv_xive_get_nvt
;
1863 xrc
->write_nvt
= pnv_xive_write_nvt
;
1864 xrc
->get_tctx
= pnv_xive_get_tctx
;
1866 xnc
->notify
= pnv_xive_notify
;
1867 xpc
->match_nvt
= pnv_xive_match_nvt
;
1870 static const TypeInfo pnv_xive_info
= {
1871 .name
= TYPE_PNV_XIVE
,
1872 .parent
= TYPE_XIVE_ROUTER
,
1873 .instance_init
= pnv_xive_init
,
1874 .instance_size
= sizeof(PnvXive
),
1875 .class_init
= pnv_xive_class_init
,
1876 .interfaces
= (InterfaceInfo
[]) {
1877 { TYPE_PNV_XSCOM_INTERFACE
},
1882 static void pnv_xive_register_types(void)
1884 type_register_static(&pnv_xive_info
);
1887 type_init(pnv_xive_register_types
)