2 * QEMU PowerPC XIVE interrupt controller model
4 * Copyright (c) 2017-2019, IBM Corporation.
6 * This code is licensed under the GPL version 2 or later. See the
7 * COPYING file in the top-level directory.
10 #include "qemu/osdep.h"
12 #include "qemu/module.h"
13 #include "qapi/error.h"
14 #include "target/ppc/cpu.h"
15 #include "sysemu/cpus.h"
16 #include "sysemu/dma.h"
17 #include "sysemu/reset.h"
18 #include "monitor/monitor.h"
19 #include "hw/ppc/fdt.h"
20 #include "hw/ppc/pnv.h"
21 #include "hw/ppc/pnv_core.h"
22 #include "hw/ppc/pnv_xscom.h"
23 #include "hw/ppc/pnv_xive.h"
24 #include "hw/ppc/xive_regs.h"
25 #include "hw/qdev-properties.h"
26 #include "hw/ppc/ppc.h"
30 #include "pnv_xive_regs.h"
35 * Virtual structures table (VST)
37 #define SBE_PER_BYTE 4
39 typedef struct XiveVstInfo
{
45 static const XiveVstInfo vst_infos
[] = {
46 [VST_TSEL_IVT
] = { "EAT", sizeof(XiveEAS
), 16 },
47 [VST_TSEL_SBE
] = { "SBE", 1, 16 },
48 [VST_TSEL_EQDT
] = { "ENDT", sizeof(XiveEND
), 16 },
49 [VST_TSEL_VPDT
] = { "VPDT", sizeof(XiveNVT
), 32 },
52 * Interrupt fifo backing store table (not modeled) :
57 * 3 - Second escalate,
59 * 5 - IPI cascaded queue ?
61 [VST_TSEL_IRQ
] = { "IRQ", 1, 6 },
64 #define xive_error(xive, fmt, ...) \
65 qemu_log_mask(LOG_GUEST_ERROR, "XIVE[%x] - " fmt "\n", \
66 (xive)->chip->chip_id, ## __VA_ARGS__);
69 * QEMU version of the GETFIELD/SETFIELD macros
71 * TODO: It might be better to use the existing extract64() and
72 * deposit64() but this means that all the register definitions will
73 * change and become incompatible with the ones found in skiboot.
75 * Keep it as it is for now until we find a common ground.
77 static inline uint64_t GETFIELD(uint64_t mask
, uint64_t word
)
79 return (word
& mask
) >> ctz64(mask
);
82 static inline uint64_t SETFIELD(uint64_t mask
, uint64_t word
,
85 return (word
& ~mask
) | ((value
<< ctz64(mask
)) & mask
);
89 * Remote access to controllers. HW uses MMIOs. For now, a simple scan
90 * of the chips is good enough.
92 * TODO: Block scope support
94 static PnvXive
*pnv_xive_get_ic(uint8_t blk
)
96 PnvMachineState
*pnv
= PNV_MACHINE(qdev_get_machine());
99 for (i
= 0; i
< pnv
->num_chips
; i
++) {
100 Pnv9Chip
*chip9
= PNV9_CHIP(pnv
->chips
[i
]);
101 PnvXive
*xive
= &chip9
->xive
;
103 if (xive
->chip
->chip_id
== blk
) {
111 * VST accessors for SBE, EAT, ENDT, NVT
113 * Indirect VST tables are arrays of VSDs pointing to a page (of same
114 * size). Each page is a direct VST table.
117 #define XIVE_VSD_SIZE 8
119 /* Indirect page size can be 4K, 64K, 2M, 16M. */
120 static uint64_t pnv_xive_vst_page_size_allowed(uint32_t page_shift
)
122 return page_shift
== 12 || page_shift
== 16 ||
123 page_shift
== 21 || page_shift
== 24;
126 static uint64_t pnv_xive_vst_addr_direct(PnvXive
*xive
, uint32_t type
,
127 uint64_t vsd
, uint32_t idx
)
129 const XiveVstInfo
*info
= &vst_infos
[type
];
130 uint64_t vst_addr
= vsd
& VSD_ADDRESS_MASK
;
131 uint64_t vst_tsize
= 1ull << (GETFIELD(VSD_TSIZE
, vsd
) + 12);
134 idx_max
= vst_tsize
/ info
->size
- 1;
137 xive_error(xive
, "VST: %s entry %x out of range [ 0 .. %x ] !?",
138 info
->name
, idx
, idx_max
);
143 return vst_addr
+ idx
* info
->size
;
146 static uint64_t pnv_xive_vst_addr_indirect(PnvXive
*xive
, uint32_t type
,
147 uint64_t vsd
, uint32_t idx
)
149 const XiveVstInfo
*info
= &vst_infos
[type
];
153 uint32_t vst_per_page
;
155 /* Get the page size of the indirect table. */
156 vsd_addr
= vsd
& VSD_ADDRESS_MASK
;
157 vsd
= ldq_be_dma(&address_space_memory
, vsd_addr
);
159 if (!(vsd
& VSD_ADDRESS_MASK
)) {
161 xive_error(xive
, "VST: invalid %s entry %x !?", info
->name
, idx
);
166 page_shift
= GETFIELD(VSD_TSIZE
, vsd
) + 12;
168 if (!pnv_xive_vst_page_size_allowed(page_shift
)) {
169 xive_error(xive
, "VST: invalid %s page shift %d", info
->name
,
174 vst_per_page
= (1ull << page_shift
) / info
->size
;
175 vsd_idx
= idx
/ vst_per_page
;
177 /* Load the VSD we are looking for, if not already done */
179 vsd_addr
= vsd_addr
+ vsd_idx
* XIVE_VSD_SIZE
;
180 vsd
= ldq_be_dma(&address_space_memory
, vsd_addr
);
182 if (!(vsd
& VSD_ADDRESS_MASK
)) {
184 xive_error(xive
, "VST: invalid %s entry %x !?", info
->name
, idx
);
190 * Check that the pages have a consistent size across the
193 if (page_shift
!= GETFIELD(VSD_TSIZE
, vsd
) + 12) {
194 xive_error(xive
, "VST: %s entry %x indirect page size differ !?",
200 return pnv_xive_vst_addr_direct(xive
, type
, vsd
, (idx
% vst_per_page
));
203 static uint64_t pnv_xive_vst_addr(PnvXive
*xive
, uint32_t type
, uint8_t blk
,
206 const XiveVstInfo
*info
= &vst_infos
[type
];
209 if (blk
>= info
->max_blocks
) {
210 xive_error(xive
, "VST: invalid block id %d for VST %s %d !?",
211 blk
, info
->name
, idx
);
215 vsd
= xive
->vsds
[type
][blk
];
217 /* Remote VST access */
218 if (GETFIELD(VSD_MODE
, vsd
) == VSD_MODE_FORWARD
) {
219 xive
= pnv_xive_get_ic(blk
);
221 return xive
? pnv_xive_vst_addr(xive
, type
, blk
, idx
) : 0;
224 if (VSD_INDIRECT
& vsd
) {
225 return pnv_xive_vst_addr_indirect(xive
, type
, vsd
, idx
);
228 return pnv_xive_vst_addr_direct(xive
, type
, vsd
, idx
);
231 static int pnv_xive_vst_read(PnvXive
*xive
, uint32_t type
, uint8_t blk
,
232 uint32_t idx
, void *data
)
234 const XiveVstInfo
*info
= &vst_infos
[type
];
235 uint64_t addr
= pnv_xive_vst_addr(xive
, type
, blk
, idx
);
241 cpu_physical_memory_read(addr
, data
, info
->size
);
245 #define XIVE_VST_WORD_ALL -1
247 static int pnv_xive_vst_write(PnvXive
*xive
, uint32_t type
, uint8_t blk
,
248 uint32_t idx
, void *data
, uint32_t word_number
)
250 const XiveVstInfo
*info
= &vst_infos
[type
];
251 uint64_t addr
= pnv_xive_vst_addr(xive
, type
, blk
, idx
);
257 if (word_number
== XIVE_VST_WORD_ALL
) {
258 cpu_physical_memory_write(addr
, data
, info
->size
);
260 cpu_physical_memory_write(addr
+ word_number
* 4,
261 data
+ word_number
* 4, 4);
266 static int pnv_xive_get_end(XiveRouter
*xrtr
, uint8_t blk
, uint32_t idx
,
269 return pnv_xive_vst_read(PNV_XIVE(xrtr
), VST_TSEL_EQDT
, blk
, idx
, end
);
272 static int pnv_xive_write_end(XiveRouter
*xrtr
, uint8_t blk
, uint32_t idx
,
273 XiveEND
*end
, uint8_t word_number
)
275 return pnv_xive_vst_write(PNV_XIVE(xrtr
), VST_TSEL_EQDT
, blk
, idx
, end
,
279 static int pnv_xive_end_update(PnvXive
*xive
)
281 uint8_t blk
= GETFIELD(VC_EQC_CWATCH_BLOCKID
,
282 xive
->regs
[(VC_EQC_CWATCH_SPEC
>> 3)]);
283 uint32_t idx
= GETFIELD(VC_EQC_CWATCH_OFFSET
,
284 xive
->regs
[(VC_EQC_CWATCH_SPEC
>> 3)]);
286 uint64_t eqc_watch
[4];
288 for (i
= 0; i
< ARRAY_SIZE(eqc_watch
); i
++) {
289 eqc_watch
[i
] = cpu_to_be64(xive
->regs
[(VC_EQC_CWATCH_DAT0
>> 3) + i
]);
292 return pnv_xive_vst_write(xive
, VST_TSEL_EQDT
, blk
, idx
, eqc_watch
,
296 static void pnv_xive_end_cache_load(PnvXive
*xive
)
298 uint8_t blk
= GETFIELD(VC_EQC_CWATCH_BLOCKID
,
299 xive
->regs
[(VC_EQC_CWATCH_SPEC
>> 3)]);
300 uint32_t idx
= GETFIELD(VC_EQC_CWATCH_OFFSET
,
301 xive
->regs
[(VC_EQC_CWATCH_SPEC
>> 3)]);
302 uint64_t eqc_watch
[4] = { 0 };
305 if (pnv_xive_vst_read(xive
, VST_TSEL_EQDT
, blk
, idx
, eqc_watch
)) {
306 xive_error(xive
, "VST: no END entry %x/%x !?", blk
, idx
);
309 for (i
= 0; i
< ARRAY_SIZE(eqc_watch
); i
++) {
310 xive
->regs
[(VC_EQC_CWATCH_DAT0
>> 3) + i
] = be64_to_cpu(eqc_watch
[i
]);
314 static int pnv_xive_get_nvt(XiveRouter
*xrtr
, uint8_t blk
, uint32_t idx
,
317 return pnv_xive_vst_read(PNV_XIVE(xrtr
), VST_TSEL_VPDT
, blk
, idx
, nvt
);
320 static int pnv_xive_write_nvt(XiveRouter
*xrtr
, uint8_t blk
, uint32_t idx
,
321 XiveNVT
*nvt
, uint8_t word_number
)
323 return pnv_xive_vst_write(PNV_XIVE(xrtr
), VST_TSEL_VPDT
, blk
, idx
, nvt
,
327 static int pnv_xive_nvt_update(PnvXive
*xive
)
329 uint8_t blk
= GETFIELD(PC_VPC_CWATCH_BLOCKID
,
330 xive
->regs
[(PC_VPC_CWATCH_SPEC
>> 3)]);
331 uint32_t idx
= GETFIELD(PC_VPC_CWATCH_OFFSET
,
332 xive
->regs
[(PC_VPC_CWATCH_SPEC
>> 3)]);
334 uint64_t vpc_watch
[8];
336 for (i
= 0; i
< ARRAY_SIZE(vpc_watch
); i
++) {
337 vpc_watch
[i
] = cpu_to_be64(xive
->regs
[(PC_VPC_CWATCH_DAT0
>> 3) + i
]);
340 return pnv_xive_vst_write(xive
, VST_TSEL_VPDT
, blk
, idx
, vpc_watch
,
344 static void pnv_xive_nvt_cache_load(PnvXive
*xive
)
346 uint8_t blk
= GETFIELD(PC_VPC_CWATCH_BLOCKID
,
347 xive
->regs
[(PC_VPC_CWATCH_SPEC
>> 3)]);
348 uint32_t idx
= GETFIELD(PC_VPC_CWATCH_OFFSET
,
349 xive
->regs
[(PC_VPC_CWATCH_SPEC
>> 3)]);
350 uint64_t vpc_watch
[8] = { 0 };
353 if (pnv_xive_vst_read(xive
, VST_TSEL_VPDT
, blk
, idx
, vpc_watch
)) {
354 xive_error(xive
, "VST: no NVT entry %x/%x !?", blk
, idx
);
357 for (i
= 0; i
< ARRAY_SIZE(vpc_watch
); i
++) {
358 xive
->regs
[(PC_VPC_CWATCH_DAT0
>> 3) + i
] = be64_to_cpu(vpc_watch
[i
]);
362 static int pnv_xive_get_eas(XiveRouter
*xrtr
, uint8_t blk
, uint32_t idx
,
365 PnvXive
*xive
= PNV_XIVE(xrtr
);
367 if (pnv_xive_get_ic(blk
) != xive
) {
368 xive_error(xive
, "VST: EAS %x is remote !?", XIVE_EAS(blk
, idx
));
372 return pnv_xive_vst_read(xive
, VST_TSEL_IVT
, blk
, idx
, eas
);
375 static int pnv_xive_match_nvt(XivePresenter
*xptr
, uint8_t format
,
376 uint8_t nvt_blk
, uint32_t nvt_idx
,
377 bool cam_ignore
, uint8_t priority
,
378 uint32_t logic_serv
, XiveTCTXMatch
*match
)
380 PnvXive
*xive
= PNV_XIVE(xptr
);
381 PnvChip
*chip
= xive
->chip
;
385 for (i
= 0; i
< chip
->nr_cores
; i
++) {
386 PnvCore
*pc
= chip
->cores
[i
];
387 CPUCore
*cc
= CPU_CORE(pc
);
389 for (j
= 0; j
< cc
->nr_threads
; j
++) {
390 PowerPCCPU
*cpu
= pc
->threads
[j
];
394 tctx
= XIVE_TCTX(pnv_cpu_state(cpu
)->intc
);
397 * Check the thread context CAM lines and record matches.
399 ring
= xive_presenter_tctx_match(xptr
, tctx
, format
, nvt_blk
,
400 nvt_idx
, cam_ignore
, logic_serv
);
402 * Save the context and follow on to catch duplicates, that we
407 qemu_log_mask(LOG_GUEST_ERROR
, "XIVE: already found a "
408 "thread context NVT %x/%x\n",
423 static XiveTCTX
*pnv_xive_get_tctx(XiveRouter
*xrtr
, CPUState
*cs
)
425 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
426 XiveTCTX
*tctx
= XIVE_TCTX(pnv_cpu_state(cpu
)->intc
);
427 PnvXive
*xive
= NULL
;
428 CPUPPCState
*env
= &cpu
->env
;
429 int pir
= env
->spr_cb
[SPR_PIR
].default_value
;
432 * Perform an extra check on the HW thread enablement.
434 * The TIMA is shared among the chips and to identify the chip
435 * from which the access is being done, we extract the chip id
438 xive
= pnv_xive_get_ic((pir
>> 8) & 0xf);
443 if (!(xive
->regs
[PC_THREAD_EN_REG0
>> 3] & PPC_BIT(pir
& 0x3f))) {
444 xive_error(PNV_XIVE(xrtr
), "IC: CPU %x is not enabled", pir
);
451 * The internal sources (IPIs) of the interrupt controller have no
452 * knowledge of the XIVE chip on which they reside. Encode the block
453 * id in the source interrupt number before forwarding the source
454 * event notification to the Router. This is required on a multichip
457 static void pnv_xive_notify(XiveNotifier
*xn
, uint32_t srcno
)
459 PnvXive
*xive
= PNV_XIVE(xn
);
460 uint8_t blk
= xive
->chip
->chip_id
;
462 xive_router_notify(xn
, XIVE_EAS(blk
, srcno
));
469 static uint64_t pnv_xive_vc_size(PnvXive
*xive
)
471 return (~xive
->regs
[CQ_VC_BARM
>> 3] + 1) & CQ_VC_BARM_MASK
;
474 static uint64_t pnv_xive_edt_shift(PnvXive
*xive
)
476 return ctz64(pnv_xive_vc_size(xive
) / XIVE_TABLE_EDT_MAX
);
479 static uint64_t pnv_xive_pc_size(PnvXive
*xive
)
481 return (~xive
->regs
[CQ_PC_BARM
>> 3] + 1) & CQ_PC_BARM_MASK
;
484 static uint32_t pnv_xive_nr_ipis(PnvXive
*xive
, uint8_t blk
)
486 uint64_t vsd
= xive
->vsds
[VST_TSEL_SBE
][blk
];
487 uint64_t vst_tsize
= 1ull << (GETFIELD(VSD_TSIZE
, vsd
) + 12);
489 return VSD_INDIRECT
& vsd
? 0 : vst_tsize
* SBE_PER_BYTE
;
495 * The Virtualization Controller MMIO region containing the IPI ESB
496 * pages and END ESB pages is sub-divided into "sets" which map
497 * portions of the VC region to the different ESB pages. It is
498 * configured at runtime through the EDT "Domain Table" to let the
499 * firmware decide how to split the VC address space between IPI ESB
500 * pages and END ESB pages.
504 * Computes the overall size of the IPI or the END ESB pages
506 static uint64_t pnv_xive_edt_size(PnvXive
*xive
, uint64_t type
)
508 uint64_t edt_size
= 1ull << pnv_xive_edt_shift(xive
);
512 for (i
= 0; i
< XIVE_TABLE_EDT_MAX
; i
++) {
513 uint64_t edt_type
= GETFIELD(CQ_TDR_EDT_TYPE
, xive
->edt
[i
]);
515 if (edt_type
== type
) {
524 * Maps an offset of the VC region in the IPI or END region using the
525 * layout defined by the EDT "Domaine Table"
527 static uint64_t pnv_xive_edt_offset(PnvXive
*xive
, uint64_t vc_offset
,
531 uint64_t edt_size
= 1ull << pnv_xive_edt_shift(xive
);
532 uint64_t edt_offset
= vc_offset
;
534 for (i
= 0; i
< XIVE_TABLE_EDT_MAX
&& (i
* edt_size
) < vc_offset
; i
++) {
535 uint64_t edt_type
= GETFIELD(CQ_TDR_EDT_TYPE
, xive
->edt
[i
]);
537 if (edt_type
!= type
) {
538 edt_offset
-= edt_size
;
545 static void pnv_xive_edt_resize(PnvXive
*xive
)
547 uint64_t ipi_edt_size
= pnv_xive_edt_size(xive
, CQ_TDR_EDT_IPI
);
548 uint64_t end_edt_size
= pnv_xive_edt_size(xive
, CQ_TDR_EDT_EQ
);
550 memory_region_set_size(&xive
->ipi_edt_mmio
, ipi_edt_size
);
551 memory_region_add_subregion(&xive
->ipi_mmio
, 0, &xive
->ipi_edt_mmio
);
553 memory_region_set_size(&xive
->end_edt_mmio
, end_edt_size
);
554 memory_region_add_subregion(&xive
->end_mmio
, 0, &xive
->end_edt_mmio
);
558 * XIVE Table configuration. Only EDT is supported.
560 static int pnv_xive_table_set_data(PnvXive
*xive
, uint64_t val
)
562 uint64_t tsel
= xive
->regs
[CQ_TAR
>> 3] & CQ_TAR_TSEL
;
563 uint8_t tsel_index
= GETFIELD(CQ_TAR_TSEL_INDEX
, xive
->regs
[CQ_TAR
>> 3]);
564 uint64_t *xive_table
;
568 case CQ_TAR_TSEL_BLK
:
569 max_index
= ARRAY_SIZE(xive
->blk
);
570 xive_table
= xive
->blk
;
572 case CQ_TAR_TSEL_MIG
:
573 max_index
= ARRAY_SIZE(xive
->mig
);
574 xive_table
= xive
->mig
;
576 case CQ_TAR_TSEL_EDT
:
577 max_index
= ARRAY_SIZE(xive
->edt
);
578 xive_table
= xive
->edt
;
580 case CQ_TAR_TSEL_VDT
:
581 max_index
= ARRAY_SIZE(xive
->vdt
);
582 xive_table
= xive
->vdt
;
585 xive_error(xive
, "IC: invalid table %d", (int) tsel
);
589 if (tsel_index
>= max_index
) {
590 xive_error(xive
, "IC: invalid index %d", (int) tsel_index
);
594 xive_table
[tsel_index
] = val
;
596 if (xive
->regs
[CQ_TAR
>> 3] & CQ_TAR_TBL_AUTOINC
) {
597 xive
->regs
[CQ_TAR
>> 3] =
598 SETFIELD(CQ_TAR_TSEL_INDEX
, xive
->regs
[CQ_TAR
>> 3], ++tsel_index
);
602 * EDT configuration is complete. Resize the MMIO windows exposing
603 * the IPI and the END ESBs in the VC region.
605 if (tsel
== CQ_TAR_TSEL_EDT
&& tsel_index
== ARRAY_SIZE(xive
->edt
)) {
606 pnv_xive_edt_resize(xive
);
613 * Virtual Structure Tables (VST) configuration
615 static void pnv_xive_vst_set_exclusive(PnvXive
*xive
, uint8_t type
,
616 uint8_t blk
, uint64_t vsd
)
618 XiveENDSource
*end_xsrc
= &xive
->end_source
;
619 XiveSource
*xsrc
= &xive
->ipi_source
;
620 const XiveVstInfo
*info
= &vst_infos
[type
];
621 uint32_t page_shift
= GETFIELD(VSD_TSIZE
, vsd
) + 12;
622 uint64_t vst_tsize
= 1ull << page_shift
;
623 uint64_t vst_addr
= vsd
& VSD_ADDRESS_MASK
;
627 if (VSD_INDIRECT
& vsd
) {
628 if (!(xive
->regs
[VC_GLOBAL_CONFIG
>> 3] & VC_GCONF_INDIRECT
)) {
629 xive_error(xive
, "VST: %s indirect tables are not enabled",
634 if (!pnv_xive_vst_page_size_allowed(page_shift
)) {
635 xive_error(xive
, "VST: invalid %s page shift %d", info
->name
,
641 if (!QEMU_IS_ALIGNED(vst_addr
, 1ull << page_shift
)) {
642 xive_error(xive
, "VST: %s table address 0x%"PRIx64
" is not aligned with"
643 " page shift %d", info
->name
, vst_addr
, page_shift
);
647 /* Record the table configuration (in SRAM on HW) */
648 xive
->vsds
[type
][blk
] = vsd
;
650 /* Now tune the models with the configuration provided by the FW */
653 case VST_TSEL_IVT
: /* Nothing to be done */
658 * Backing store pages for the END.
660 * If the table is direct, we can compute the number of PQ
661 * entries provisioned by FW (such as skiboot) and resize the
662 * END ESB window accordingly.
664 if (!(VSD_INDIRECT
& vsd
)) {
665 memory_region_set_size(&end_xsrc
->esb_mmio
, (vst_tsize
/ info
->size
)
666 * (1ull << xsrc
->esb_shift
));
668 memory_region_add_subregion(&xive
->end_edt_mmio
, 0,
669 &end_xsrc
->esb_mmio
);
674 * Backing store pages for the source PQ bits. The model does
675 * not use these PQ bits backed in RAM because the XiveSource
678 * If the table is direct, we can compute the number of PQ
679 * entries provisioned by FW (such as skiboot) and resize the
680 * ESB window accordingly.
682 if (!(VSD_INDIRECT
& vsd
)) {
683 memory_region_set_size(&xsrc
->esb_mmio
, vst_tsize
* SBE_PER_BYTE
684 * (1ull << xsrc
->esb_shift
));
686 memory_region_add_subregion(&xive
->ipi_edt_mmio
, 0, &xsrc
->esb_mmio
);
689 case VST_TSEL_VPDT
: /* Not modeled */
690 case VST_TSEL_IRQ
: /* Not modeled */
692 * These tables contains the backing store pages for the
693 * interrupt fifos of the VC sub-engine in case of overflow.
698 g_assert_not_reached();
703 * Both PC and VC sub-engines are configured as each use the Virtual
704 * Structure Tables : SBE, EAS, END and NVT.
706 static void pnv_xive_vst_set_data(PnvXive
*xive
, uint64_t vsd
, bool pc_engine
)
708 uint8_t mode
= GETFIELD(VSD_MODE
, vsd
);
709 uint8_t type
= GETFIELD(VST_TABLE_SELECT
,
710 xive
->regs
[VC_VSD_TABLE_ADDR
>> 3]);
711 uint8_t blk
= GETFIELD(VST_TABLE_BLOCK
,
712 xive
->regs
[VC_VSD_TABLE_ADDR
>> 3]);
713 uint64_t vst_addr
= vsd
& VSD_ADDRESS_MASK
;
715 if (type
> VST_TSEL_IRQ
) {
716 xive_error(xive
, "VST: invalid table type %d", type
);
720 if (blk
>= vst_infos
[type
].max_blocks
) {
721 xive_error(xive
, "VST: invalid block id %d for"
722 " %s table", blk
, vst_infos
[type
].name
);
727 * Only take the VC sub-engine configuration into account because
728 * the XiveRouter model combines both VC and PC sub-engines
735 xive_error(xive
, "VST: invalid %s table address", vst_infos
[type
].name
);
740 case VSD_MODE_FORWARD
:
741 xive
->vsds
[type
][blk
] = vsd
;
744 case VSD_MODE_EXCLUSIVE
:
745 pnv_xive_vst_set_exclusive(xive
, type
, blk
, vsd
);
749 xive_error(xive
, "VST: unsupported table mode %d", mode
);
755 * Interrupt controller MMIO region. The layout is compatible between
758 * Page 0 sub-engine BARs
759 * 0x000 - 0x3FF IC registers
760 * 0x400 - 0x7FF PC registers
761 * 0x800 - 0xFFF VC registers
763 * Page 1 Notify page (writes only)
764 * 0x000 - 0x7FF HW interrupt triggers (PSI, PHB)
765 * 0x800 - 0xFFF forwards and syncs
767 * Page 2 LSI Trigger page (writes only) (not modeled)
768 * Page 3 LSI SB EOI page (reads only) (not modeled)
770 * Page 4-7 indirect TIMA
774 * IC - registers MMIO
776 static void pnv_xive_ic_reg_write(void *opaque
, hwaddr offset
,
777 uint64_t val
, unsigned size
)
779 PnvXive
*xive
= PNV_XIVE(opaque
);
780 MemoryRegion
*sysmem
= get_system_memory();
781 uint32_t reg
= offset
>> 3;
782 bool is_chip0
= xive
->chip
->chip_id
== 0;
787 * XIVE CQ (PowerBus bridge) settings
789 case CQ_MSGSND
: /* msgsnd for doorbells */
790 case CQ_FIRMASK_OR
: /* FIR error reporting */
793 if (val
& CQ_PBI_PC_64K
) {
796 if (val
& CQ_PBI_VC_64K
) {
800 case CQ_CFG_PB_GEN
: /* PowerBus General Configuration */
802 * TODO: CQ_INT_ADDR_OPT for 1-block-per-chip mode
807 * XIVE Virtualization Controller settings
809 case VC_GLOBAL_CONFIG
:
813 * XIVE Presenter Controller settings
815 case PC_GLOBAL_CONFIG
:
817 * PC_GCONF_CHIPID_OVR
818 * Overrides Int command Chip ID with the Chip ID field (DEBUG)
823 * TODO: block group support
825 * PC_TCTXT_CFG_BLKGRP_EN
826 * PC_TCTXT_CFG_HARD_CHIPID_BLK :
827 * Moves the chipid into block field for hardwired CAM compares.
828 * Block offset value is adjusted to 0b0..01 & ThrdId
830 * Will require changes in xive_presenter_tctx_match(). I am
831 * not sure how to handle that yet.
834 /* Overrides hardwired chip ID with the chip ID field */
835 if (val
& PC_TCTXT_CHIPID_OVERRIDE
) {
836 xive
->tctx_chipid
= GETFIELD(PC_TCTXT_CHIPID
, val
);
842 * enable block tracking and exchange of block ownership
843 * information between Interrupt controllers
850 case VC_SBC_CONFIG
: /* Store EOI configuration */
852 * Configure store EOI if required by firwmare (skiboot has removed
853 * support recently though)
855 if (val
& (VC_SBC_CONF_CPLX_CIST
| VC_SBC_CONF_CIST_BOTH
)) {
856 xive
->ipi_source
.esb_flags
|= XIVE_SRC_STORE_EOI
;
860 case VC_EQC_CONFIG
: /* TODO: silent escalation */
861 case VC_AIB_TX_ORDER_TAG2
: /* relax ordering */
865 * XIVE BAR settings (XSCOM only)
868 /* bit4: resets all BAR registers */
871 case CQ_IC_BAR
: /* IC BAR. 8 pages */
872 xive
->ic_shift
= val
& CQ_IC_BAR_64K
? 16 : 12;
873 if (!(val
& CQ_IC_BAR_VALID
)) {
875 if (xive
->regs
[reg
] & CQ_IC_BAR_VALID
) {
876 memory_region_del_subregion(&xive
->ic_mmio
,
878 memory_region_del_subregion(&xive
->ic_mmio
,
879 &xive
->ic_notify_mmio
);
880 memory_region_del_subregion(&xive
->ic_mmio
,
882 memory_region_del_subregion(&xive
->ic_mmio
,
883 &xive
->tm_indirect_mmio
);
885 memory_region_del_subregion(sysmem
, &xive
->ic_mmio
);
888 xive
->ic_base
= val
& ~(CQ_IC_BAR_VALID
| CQ_IC_BAR_64K
);
889 if (!(xive
->regs
[reg
] & CQ_IC_BAR_VALID
)) {
890 memory_region_add_subregion(sysmem
, xive
->ic_base
,
893 memory_region_add_subregion(&xive
->ic_mmio
, 0,
895 memory_region_add_subregion(&xive
->ic_mmio
,
896 1ul << xive
->ic_shift
,
897 &xive
->ic_notify_mmio
);
898 memory_region_add_subregion(&xive
->ic_mmio
,
899 2ul << xive
->ic_shift
,
901 memory_region_add_subregion(&xive
->ic_mmio
,
902 4ull << xive
->ic_shift
,
903 &xive
->tm_indirect_mmio
);
908 case CQ_TM1_BAR
: /* TM BAR. 4 pages. Map only once */
909 case CQ_TM2_BAR
: /* second TM BAR. for hotplug. Not modeled */
910 xive
->tm_shift
= val
& CQ_TM_BAR_64K
? 16 : 12;
911 if (!(val
& CQ_TM_BAR_VALID
)) {
913 if (xive
->regs
[reg
] & CQ_TM_BAR_VALID
&& is_chip0
) {
914 memory_region_del_subregion(sysmem
, &xive
->tm_mmio
);
917 xive
->tm_base
= val
& ~(CQ_TM_BAR_VALID
| CQ_TM_BAR_64K
);
918 if (!(xive
->regs
[reg
] & CQ_TM_BAR_VALID
) && is_chip0
) {
919 memory_region_add_subregion(sysmem
, xive
->tm_base
,
926 xive
->regs
[reg
] = val
;
927 memory_region_set_size(&xive
->pc_mmio
, pnv_xive_pc_size(xive
));
929 case CQ_PC_BAR
: /* From 32M to 512G */
930 if (!(val
& CQ_PC_BAR_VALID
)) {
932 if (xive
->regs
[reg
] & CQ_PC_BAR_VALID
) {
933 memory_region_del_subregion(sysmem
, &xive
->pc_mmio
);
936 xive
->pc_base
= val
& ~(CQ_PC_BAR_VALID
);
937 if (!(xive
->regs
[reg
] & CQ_PC_BAR_VALID
)) {
938 memory_region_add_subregion(sysmem
, xive
->pc_base
,
945 xive
->regs
[reg
] = val
;
946 memory_region_set_size(&xive
->vc_mmio
, pnv_xive_vc_size(xive
));
948 case CQ_VC_BAR
: /* From 64M to 4TB */
949 if (!(val
& CQ_VC_BAR_VALID
)) {
951 if (xive
->regs
[reg
] & CQ_VC_BAR_VALID
) {
952 memory_region_del_subregion(sysmem
, &xive
->vc_mmio
);
955 xive
->vc_base
= val
& ~(CQ_VC_BAR_VALID
);
956 if (!(xive
->regs
[reg
] & CQ_VC_BAR_VALID
)) {
957 memory_region_add_subregion(sysmem
, xive
->vc_base
,
964 * XIVE Table settings.
966 case CQ_TAR
: /* Table Address */
968 case CQ_TDR
: /* Table Data */
969 pnv_xive_table_set_data(xive
, val
);
973 * XIVE VC & PC Virtual Structure Table settings
975 case VC_VSD_TABLE_ADDR
:
976 case PC_VSD_TABLE_ADDR
: /* Virtual table selector */
978 case VC_VSD_TABLE_DATA
: /* Virtual table setting */
979 case PC_VSD_TABLE_DATA
:
980 pnv_xive_vst_set_data(xive
, val
, offset
== PC_VSD_TABLE_DATA
);
984 * Interrupt fifo overflow in memory backing store (Not modeled)
986 case VC_IRQ_CONFIG_IPI
:
987 case VC_IRQ_CONFIG_HW
:
988 case VC_IRQ_CONFIG_CASCADE1
:
989 case VC_IRQ_CONFIG_CASCADE2
:
990 case VC_IRQ_CONFIG_REDIST
:
991 case VC_IRQ_CONFIG_IPI_CASC
:
995 * XIVE hardware thread enablement
997 case PC_THREAD_EN_REG0
: /* Physical Thread Enable */
998 case PC_THREAD_EN_REG1
: /* Physical Thread Enable (fused core) */
1001 case PC_THREAD_EN_REG0_SET
:
1002 xive
->regs
[PC_THREAD_EN_REG0
>> 3] |= val
;
1004 case PC_THREAD_EN_REG1_SET
:
1005 xive
->regs
[PC_THREAD_EN_REG1
>> 3] |= val
;
1007 case PC_THREAD_EN_REG0_CLR
:
1008 xive
->regs
[PC_THREAD_EN_REG0
>> 3] &= ~val
;
1010 case PC_THREAD_EN_REG1_CLR
:
1011 xive
->regs
[PC_THREAD_EN_REG1
>> 3] &= ~val
;
1015 * Indirect TIMA access set up. Defines the PIR of the HW thread
1018 case PC_TCTXT_INDIR0
... PC_TCTXT_INDIR3
:
1022 * XIVE PC & VC cache updates for EAS, NVT and END
1024 case VC_IVC_SCRUB_MASK
:
1025 case VC_IVC_SCRUB_TRIG
:
1028 case VC_EQC_CWATCH_SPEC
:
1029 val
&= ~VC_EQC_CWATCH_CONFLICT
; /* HW resets this bit */
1031 case VC_EQC_CWATCH_DAT1
... VC_EQC_CWATCH_DAT3
:
1033 case VC_EQC_CWATCH_DAT0
:
1034 /* writing to DATA0 triggers the cache write */
1035 xive
->regs
[reg
] = val
;
1036 pnv_xive_end_update(xive
);
1038 case VC_EQC_SCRUB_MASK
:
1039 case VC_EQC_SCRUB_TRIG
:
1041 * The scrubbing registers flush the cache in RAM and can also
1046 case PC_VPC_CWATCH_SPEC
:
1047 val
&= ~PC_VPC_CWATCH_CONFLICT
; /* HW resets this bit */
1049 case PC_VPC_CWATCH_DAT1
... PC_VPC_CWATCH_DAT7
:
1051 case PC_VPC_CWATCH_DAT0
:
1052 /* writing to DATA0 triggers the cache write */
1053 xive
->regs
[reg
] = val
;
1054 pnv_xive_nvt_update(xive
);
1056 case PC_VPC_SCRUB_MASK
:
1057 case PC_VPC_SCRUB_TRIG
:
1059 * The scrubbing registers flush the cache in RAM and can also
1066 * XIVE PC & VC cache invalidation
1070 case VC_AT_MACRO_KILL
:
1072 case PC_AT_KILL_MASK
:
1073 case VC_AT_MACRO_KILL_MASK
:
1077 xive_error(xive
, "IC: invalid write to reg=0x%"HWADDR_PRIx
, offset
);
1081 xive
->regs
[reg
] = val
;
1084 static uint64_t pnv_xive_ic_reg_read(void *opaque
, hwaddr offset
, unsigned size
)
1086 PnvXive
*xive
= PNV_XIVE(opaque
);
1088 uint32_t reg
= offset
>> 3;
1104 case PC_TCTXT_TRACK
:
1105 case PC_TCTXT_INDIR0
:
1106 case PC_TCTXT_INDIR1
:
1107 case PC_TCTXT_INDIR2
:
1108 case PC_TCTXT_INDIR3
:
1109 case PC_GLOBAL_CONFIG
:
1111 case PC_VPC_SCRUB_MASK
:
1113 case VC_GLOBAL_CONFIG
:
1114 case VC_AIB_TX_ORDER_TAG2
:
1116 case VC_IRQ_CONFIG_IPI
:
1117 case VC_IRQ_CONFIG_HW
:
1118 case VC_IRQ_CONFIG_CASCADE1
:
1119 case VC_IRQ_CONFIG_CASCADE2
:
1120 case VC_IRQ_CONFIG_REDIST
:
1121 case VC_IRQ_CONFIG_IPI_CASC
:
1123 case VC_EQC_SCRUB_MASK
:
1124 case VC_IVC_SCRUB_MASK
:
1126 case VC_AT_MACRO_KILL_MASK
:
1127 case VC_VSD_TABLE_ADDR
:
1128 case PC_VSD_TABLE_ADDR
:
1129 case VC_VSD_TABLE_DATA
:
1130 case PC_VSD_TABLE_DATA
:
1131 case PC_THREAD_EN_REG0
:
1132 case PC_THREAD_EN_REG1
:
1133 val
= xive
->regs
[reg
];
1137 * XIVE hardware thread enablement
1139 case PC_THREAD_EN_REG0_SET
:
1140 case PC_THREAD_EN_REG0_CLR
:
1141 val
= xive
->regs
[PC_THREAD_EN_REG0
>> 3];
1143 case PC_THREAD_EN_REG1_SET
:
1144 case PC_THREAD_EN_REG1_CLR
:
1145 val
= xive
->regs
[PC_THREAD_EN_REG1
>> 3];
1148 case CQ_MSGSND
: /* Identifies which cores have msgsnd enabled. */
1149 val
= 0xffffff0000000000;
1153 * XIVE PC & VC cache updates for EAS, NVT and END
1155 case VC_EQC_CWATCH_SPEC
:
1156 xive
->regs
[reg
] = ~(VC_EQC_CWATCH_FULL
| VC_EQC_CWATCH_CONFLICT
);
1157 val
= xive
->regs
[reg
];
1159 case VC_EQC_CWATCH_DAT0
:
1161 * Load DATA registers from cache with data requested by the
1164 pnv_xive_end_cache_load(xive
);
1165 val
= xive
->regs
[reg
];
1167 case VC_EQC_CWATCH_DAT1
... VC_EQC_CWATCH_DAT3
:
1168 val
= xive
->regs
[reg
];
1171 case PC_VPC_CWATCH_SPEC
:
1172 xive
->regs
[reg
] = ~(PC_VPC_CWATCH_FULL
| PC_VPC_CWATCH_CONFLICT
);
1173 val
= xive
->regs
[reg
];
1175 case PC_VPC_CWATCH_DAT0
:
1177 * Load DATA registers from cache with data requested by the
1180 pnv_xive_nvt_cache_load(xive
);
1181 val
= xive
->regs
[reg
];
1183 case PC_VPC_CWATCH_DAT1
... PC_VPC_CWATCH_DAT7
:
1184 val
= xive
->regs
[reg
];
1187 case PC_VPC_SCRUB_TRIG
:
1188 case VC_IVC_SCRUB_TRIG
:
1189 case VC_EQC_SCRUB_TRIG
:
1190 xive
->regs
[reg
] &= ~VC_SCRUB_VALID
;
1191 val
= xive
->regs
[reg
];
1195 * XIVE PC & VC cache invalidation
1198 xive
->regs
[reg
] &= ~PC_AT_KILL_VALID
;
1199 val
= xive
->regs
[reg
];
1201 case VC_AT_MACRO_KILL
:
1202 xive
->regs
[reg
] &= ~VC_KILL_VALID
;
1203 val
= xive
->regs
[reg
];
1207 * XIVE synchronisation
1210 val
= VC_EQC_SYNC_MASK
;
1214 xive_error(xive
, "IC: invalid read reg=0x%"HWADDR_PRIx
, offset
);
1220 static const MemoryRegionOps pnv_xive_ic_reg_ops
= {
1221 .read
= pnv_xive_ic_reg_read
,
1222 .write
= pnv_xive_ic_reg_write
,
1223 .endianness
= DEVICE_BIG_ENDIAN
,
1225 .min_access_size
= 8,
1226 .max_access_size
= 8,
1229 .min_access_size
= 8,
1230 .max_access_size
= 8,
1235 * IC - Notify MMIO port page (write only)
1237 #define PNV_XIVE_FORWARD_IPI 0x800 /* Forward IPI */
1238 #define PNV_XIVE_FORWARD_HW 0x880 /* Forward HW */
1239 #define PNV_XIVE_FORWARD_OS_ESC 0x900 /* Forward OS escalation */
1240 #define PNV_XIVE_FORWARD_HW_ESC 0x980 /* Forward Hyp escalation */
1241 #define PNV_XIVE_FORWARD_REDIS 0xa00 /* Forward Redistribution */
1242 #define PNV_XIVE_RESERVED5 0xa80 /* Cache line 5 PowerBUS operation */
1243 #define PNV_XIVE_RESERVED6 0xb00 /* Cache line 6 PowerBUS operation */
1244 #define PNV_XIVE_RESERVED7 0xb80 /* Cache line 7 PowerBUS operation */
1246 /* VC synchronisation */
1247 #define PNV_XIVE_SYNC_IPI 0xc00 /* Sync IPI */
1248 #define PNV_XIVE_SYNC_HW 0xc80 /* Sync HW */
1249 #define PNV_XIVE_SYNC_OS_ESC 0xd00 /* Sync OS escalation */
1250 #define PNV_XIVE_SYNC_HW_ESC 0xd80 /* Sync Hyp escalation */
1251 #define PNV_XIVE_SYNC_REDIS 0xe00 /* Sync Redistribution */
1253 /* PC synchronisation */
1254 #define PNV_XIVE_SYNC_PULL 0xe80 /* Sync pull context */
1255 #define PNV_XIVE_SYNC_PUSH 0xf00 /* Sync push context */
1256 #define PNV_XIVE_SYNC_VPC 0xf80 /* Sync remove VPC store */
1258 static void pnv_xive_ic_hw_trigger(PnvXive
*xive
, hwaddr addr
, uint64_t val
)
1263 if (val
& XIVE_TRIGGER_END
) {
1264 xive_error(xive
, "IC: END trigger at @0x%"HWADDR_PRIx
" data 0x%"PRIx64
,
1270 * Forward the source event notification directly to the Router.
1271 * The source interrupt number should already be correctly encoded
1272 * with the chip block id by the sending device (PHB, PSI).
1274 blk
= XIVE_EAS_BLOCK(val
);
1275 idx
= XIVE_EAS_INDEX(val
);
1277 xive_router_notify(XIVE_NOTIFIER(xive
), XIVE_EAS(blk
, idx
));
1280 static void pnv_xive_ic_notify_write(void *opaque
, hwaddr addr
, uint64_t val
,
1283 PnvXive
*xive
= PNV_XIVE(opaque
);
1285 /* VC: HW triggers */
1287 case 0x000 ... 0x7FF:
1288 pnv_xive_ic_hw_trigger(opaque
, addr
, val
);
1291 /* VC: Forwarded IRQs */
1292 case PNV_XIVE_FORWARD_IPI
:
1293 case PNV_XIVE_FORWARD_HW
:
1294 case PNV_XIVE_FORWARD_OS_ESC
:
1295 case PNV_XIVE_FORWARD_HW_ESC
:
1296 case PNV_XIVE_FORWARD_REDIS
:
1297 /* TODO: forwarded IRQs. Should be like HW triggers */
1298 xive_error(xive
, "IC: forwarded at @0x%"HWADDR_PRIx
" IRQ 0x%"PRIx64
,
1303 case PNV_XIVE_SYNC_IPI
:
1304 case PNV_XIVE_SYNC_HW
:
1305 case PNV_XIVE_SYNC_OS_ESC
:
1306 case PNV_XIVE_SYNC_HW_ESC
:
1307 case PNV_XIVE_SYNC_REDIS
:
1311 case PNV_XIVE_SYNC_PULL
:
1312 case PNV_XIVE_SYNC_PUSH
:
1313 case PNV_XIVE_SYNC_VPC
:
1317 xive_error(xive
, "IC: invalid notify write @%"HWADDR_PRIx
, addr
);
1321 static uint64_t pnv_xive_ic_notify_read(void *opaque
, hwaddr addr
,
1324 PnvXive
*xive
= PNV_XIVE(opaque
);
1326 /* loads are invalid */
1327 xive_error(xive
, "IC: invalid notify read @%"HWADDR_PRIx
, addr
);
1331 static const MemoryRegionOps pnv_xive_ic_notify_ops
= {
1332 .read
= pnv_xive_ic_notify_read
,
1333 .write
= pnv_xive_ic_notify_write
,
1334 .endianness
= DEVICE_BIG_ENDIAN
,
1336 .min_access_size
= 8,
1337 .max_access_size
= 8,
1340 .min_access_size
= 8,
1341 .max_access_size
= 8,
1346 * IC - LSI MMIO handlers (not modeled)
1349 static void pnv_xive_ic_lsi_write(void *opaque
, hwaddr addr
,
1350 uint64_t val
, unsigned size
)
1352 PnvXive
*xive
= PNV_XIVE(opaque
);
1354 xive_error(xive
, "IC: LSI invalid write @%"HWADDR_PRIx
, addr
);
1357 static uint64_t pnv_xive_ic_lsi_read(void *opaque
, hwaddr addr
, unsigned size
)
1359 PnvXive
*xive
= PNV_XIVE(opaque
);
1361 xive_error(xive
, "IC: LSI invalid read @%"HWADDR_PRIx
, addr
);
1365 static const MemoryRegionOps pnv_xive_ic_lsi_ops
= {
1366 .read
= pnv_xive_ic_lsi_read
,
1367 .write
= pnv_xive_ic_lsi_write
,
1368 .endianness
= DEVICE_BIG_ENDIAN
,
1370 .min_access_size
= 8,
1371 .max_access_size
= 8,
1374 .min_access_size
= 8,
1375 .max_access_size
= 8,
1380 * IC - Indirect TIMA MMIO handlers
1384 * When the TIMA is accessed from the indirect page, the thread id
1385 * (PIR) has to be configured in the IC registers before. This is used
1386 * for resets and for debug purpose also.
1388 static XiveTCTX
*pnv_xive_get_indirect_tctx(PnvXive
*xive
)
1390 uint64_t tctxt_indir
= xive
->regs
[PC_TCTXT_INDIR0
>> 3];
1391 PowerPCCPU
*cpu
= NULL
;
1394 if (!(tctxt_indir
& PC_TCTXT_INDIR_VALID
)) {
1395 xive_error(xive
, "IC: no indirect TIMA access in progress");
1399 pir
= GETFIELD(PC_TCTXT_INDIR_THRDID
, tctxt_indir
) & 0xff;
1400 cpu
= ppc_get_vcpu_by_pir(pir
);
1402 xive_error(xive
, "IC: invalid PIR %x for indirect access", pir
);
1406 /* Check that HW thread is XIVE enabled */
1407 if (!(xive
->regs
[PC_THREAD_EN_REG0
>> 3] & PPC_BIT(pir
& 0x3f))) {
1408 xive_error(xive
, "IC: CPU %x is not enabled", pir
);
1411 return XIVE_TCTX(pnv_cpu_state(cpu
)->intc
);
1414 static void xive_tm_indirect_write(void *opaque
, hwaddr offset
,
1415 uint64_t value
, unsigned size
)
1417 XiveTCTX
*tctx
= pnv_xive_get_indirect_tctx(PNV_XIVE(opaque
));
1419 xive_tctx_tm_write(tctx
, offset
, value
, size
);
1422 static uint64_t xive_tm_indirect_read(void *opaque
, hwaddr offset
,
1425 XiveTCTX
*tctx
= pnv_xive_get_indirect_tctx(PNV_XIVE(opaque
));
1427 return xive_tctx_tm_read(tctx
, offset
, size
);
1430 static const MemoryRegionOps xive_tm_indirect_ops
= {
1431 .read
= xive_tm_indirect_read
,
1432 .write
= xive_tm_indirect_write
,
1433 .endianness
= DEVICE_BIG_ENDIAN
,
1435 .min_access_size
= 1,
1436 .max_access_size
= 8,
1439 .min_access_size
= 1,
1440 .max_access_size
= 8,
1445 * Interrupt controller XSCOM region.
1447 static uint64_t pnv_xive_xscom_read(void *opaque
, hwaddr addr
, unsigned size
)
1449 switch (addr
>> 3) {
1450 case X_VC_EQC_CONFIG
:
1451 /* FIXME (skiboot): This is the only XSCOM load. Bizarre. */
1452 return VC_EQC_SYNC_MASK
;
1454 return pnv_xive_ic_reg_read(opaque
, addr
, size
);
1458 static void pnv_xive_xscom_write(void *opaque
, hwaddr addr
,
1459 uint64_t val
, unsigned size
)
1461 pnv_xive_ic_reg_write(opaque
, addr
, val
, size
);
1464 static const MemoryRegionOps pnv_xive_xscom_ops
= {
1465 .read
= pnv_xive_xscom_read
,
1466 .write
= pnv_xive_xscom_write
,
1467 .endianness
= DEVICE_BIG_ENDIAN
,
1469 .min_access_size
= 8,
1470 .max_access_size
= 8,
1473 .min_access_size
= 8,
1474 .max_access_size
= 8,
1479 * Virtualization Controller MMIO region containing the IPI and END ESB pages
1481 static uint64_t pnv_xive_vc_read(void *opaque
, hwaddr offset
,
1484 PnvXive
*xive
= PNV_XIVE(opaque
);
1485 uint64_t edt_index
= offset
>> pnv_xive_edt_shift(xive
);
1486 uint64_t edt_type
= 0;
1487 uint64_t edt_offset
;
1489 AddressSpace
*edt_as
= NULL
;
1492 if (edt_index
< XIVE_TABLE_EDT_MAX
) {
1493 edt_type
= GETFIELD(CQ_TDR_EDT_TYPE
, xive
->edt
[edt_index
]);
1497 case CQ_TDR_EDT_IPI
:
1498 edt_as
= &xive
->ipi_as
;
1501 edt_as
= &xive
->end_as
;
1504 xive_error(xive
, "VC: invalid EDT type for read @%"HWADDR_PRIx
, offset
);
1508 /* Remap the offset for the targeted address space */
1509 edt_offset
= pnv_xive_edt_offset(xive
, offset
, edt_type
);
1511 ret
= address_space_ldq(edt_as
, edt_offset
, MEMTXATTRS_UNSPECIFIED
,
1514 if (result
!= MEMTX_OK
) {
1515 xive_error(xive
, "VC: %s read failed at @0x%"HWADDR_PRIx
" -> @0x%"
1516 HWADDR_PRIx
, edt_type
== CQ_TDR_EDT_IPI
? "IPI" : "END",
1517 offset
, edt_offset
);
1524 static void pnv_xive_vc_write(void *opaque
, hwaddr offset
,
1525 uint64_t val
, unsigned size
)
1527 PnvXive
*xive
= PNV_XIVE(opaque
);
1528 uint64_t edt_index
= offset
>> pnv_xive_edt_shift(xive
);
1529 uint64_t edt_type
= 0;
1530 uint64_t edt_offset
;
1532 AddressSpace
*edt_as
= NULL
;
1534 if (edt_index
< XIVE_TABLE_EDT_MAX
) {
1535 edt_type
= GETFIELD(CQ_TDR_EDT_TYPE
, xive
->edt
[edt_index
]);
1539 case CQ_TDR_EDT_IPI
:
1540 edt_as
= &xive
->ipi_as
;
1543 edt_as
= &xive
->end_as
;
1546 xive_error(xive
, "VC: invalid EDT type for write @%"HWADDR_PRIx
,
1551 /* Remap the offset for the targeted address space */
1552 edt_offset
= pnv_xive_edt_offset(xive
, offset
, edt_type
);
1554 address_space_stq(edt_as
, edt_offset
, val
, MEMTXATTRS_UNSPECIFIED
, &result
);
1555 if (result
!= MEMTX_OK
) {
1556 xive_error(xive
, "VC: write failed at @0x%"HWADDR_PRIx
, edt_offset
);
1560 static const MemoryRegionOps pnv_xive_vc_ops
= {
1561 .read
= pnv_xive_vc_read
,
1562 .write
= pnv_xive_vc_write
,
1563 .endianness
= DEVICE_BIG_ENDIAN
,
1565 .min_access_size
= 8,
1566 .max_access_size
= 8,
1569 .min_access_size
= 8,
1570 .max_access_size
= 8,
1575 * Presenter Controller MMIO region. The Virtualization Controller
1576 * updates the IPB in the NVT table when required. Not modeled.
1578 static uint64_t pnv_xive_pc_read(void *opaque
, hwaddr addr
,
1581 PnvXive
*xive
= PNV_XIVE(opaque
);
1583 xive_error(xive
, "PC: invalid read @%"HWADDR_PRIx
, addr
);
1587 static void pnv_xive_pc_write(void *opaque
, hwaddr addr
,
1588 uint64_t value
, unsigned size
)
1590 PnvXive
*xive
= PNV_XIVE(opaque
);
1592 xive_error(xive
, "PC: invalid write to VC @%"HWADDR_PRIx
, addr
);
1595 static const MemoryRegionOps pnv_xive_pc_ops
= {
1596 .read
= pnv_xive_pc_read
,
1597 .write
= pnv_xive_pc_write
,
1598 .endianness
= DEVICE_BIG_ENDIAN
,
1600 .min_access_size
= 8,
1601 .max_access_size
= 8,
1604 .min_access_size
= 8,
1605 .max_access_size
= 8,
1609 void pnv_xive_pic_print_info(PnvXive
*xive
, Monitor
*mon
)
1611 XiveRouter
*xrtr
= XIVE_ROUTER(xive
);
1612 uint8_t blk
= xive
->chip
->chip_id
;
1613 uint32_t srcno0
= XIVE_EAS(blk
, 0);
1614 uint32_t nr_ipis
= pnv_xive_nr_ipis(xive
, blk
);
1619 monitor_printf(mon
, "XIVE[%x] Source %08x .. %08x\n", blk
, srcno0
,
1620 srcno0
+ nr_ipis
- 1);
1621 xive_source_pic_print_info(&xive
->ipi_source
, srcno0
, mon
);
1623 monitor_printf(mon
, "XIVE[%x] EAT %08x .. %08x\n", blk
, srcno0
,
1624 srcno0
+ nr_ipis
- 1);
1625 for (i
= 0; i
< nr_ipis
; i
++) {
1626 if (xive_router_get_eas(xrtr
, blk
, i
, &eas
)) {
1629 if (!xive_eas_is_masked(&eas
)) {
1630 xive_eas_pic_print_info(&eas
, i
, mon
);
1634 monitor_printf(mon
, "XIVE[%x] ENDT\n", blk
);
1636 while (!xive_router_get_end(xrtr
, blk
, i
, &end
)) {
1637 xive_end_pic_print_info(&end
, i
++, mon
);
1640 monitor_printf(mon
, "XIVE[%x] END Escalation EAT\n", blk
);
1642 while (!xive_router_get_end(xrtr
, blk
, i
, &end
)) {
1643 xive_end_eas_pic_print_info(&end
, i
++, mon
);
1647 static void pnv_xive_reset(void *dev
)
1649 PnvXive
*xive
= PNV_XIVE(dev
);
1650 XiveSource
*xsrc
= &xive
->ipi_source
;
1651 XiveENDSource
*end_xsrc
= &xive
->end_source
;
1654 * Use the PnvChip id to identify the XIVE interrupt controller.
1655 * It can be overriden by configuration at runtime.
1657 xive
->tctx_chipid
= xive
->chip
->chip_id
;
1659 /* Default page size (Should be changed at runtime to 64k) */
1660 xive
->ic_shift
= xive
->vc_shift
= xive
->pc_shift
= 12;
1662 /* Clear subregions */
1663 if (memory_region_is_mapped(&xsrc
->esb_mmio
)) {
1664 memory_region_del_subregion(&xive
->ipi_edt_mmio
, &xsrc
->esb_mmio
);
1667 if (memory_region_is_mapped(&xive
->ipi_edt_mmio
)) {
1668 memory_region_del_subregion(&xive
->ipi_mmio
, &xive
->ipi_edt_mmio
);
1671 if (memory_region_is_mapped(&end_xsrc
->esb_mmio
)) {
1672 memory_region_del_subregion(&xive
->end_edt_mmio
, &end_xsrc
->esb_mmio
);
1675 if (memory_region_is_mapped(&xive
->end_edt_mmio
)) {
1676 memory_region_del_subregion(&xive
->end_mmio
, &xive
->end_edt_mmio
);
1680 static void pnv_xive_init(Object
*obj
)
1682 PnvXive
*xive
= PNV_XIVE(obj
);
1684 object_initialize_child(obj
, "ipi_source", &xive
->ipi_source
,
1685 sizeof(xive
->ipi_source
), TYPE_XIVE_SOURCE
,
1686 &error_abort
, NULL
);
1687 object_initialize_child(obj
, "end_source", &xive
->end_source
,
1688 sizeof(xive
->end_source
), TYPE_XIVE_END_SOURCE
,
1689 &error_abort
, NULL
);
1693 * Maximum number of IRQs and ENDs supported by HW
1695 #define PNV_XIVE_NR_IRQS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1696 #define PNV_XIVE_NR_ENDS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
1698 static void pnv_xive_realize(DeviceState
*dev
, Error
**errp
)
1700 PnvXive
*xive
= PNV_XIVE(dev
);
1701 XiveSource
*xsrc
= &xive
->ipi_source
;
1702 XiveENDSource
*end_xsrc
= &xive
->end_source
;
1703 Error
*local_err
= NULL
;
1708 * The XiveSource and XiveENDSource objects are realized with the
1709 * maximum allowed HW configuration. The ESB MMIO regions will be
1710 * resized dynamically when the controller is configured by the FW
1711 * to limit accesses to resources not provisioned.
1713 object_property_set_int(OBJECT(xsrc
), PNV_XIVE_NR_IRQS
, "nr-irqs",
1715 object_property_set_link(OBJECT(xsrc
), OBJECT(xive
), "xive",
1717 object_property_set_bool(OBJECT(xsrc
), true, "realized", &local_err
);
1719 error_propagate(errp
, local_err
);
1723 object_property_set_int(OBJECT(end_xsrc
), PNV_XIVE_NR_ENDS
, "nr-ends",
1725 object_property_set_link(OBJECT(end_xsrc
), OBJECT(xive
), "xive",
1727 object_property_set_bool(OBJECT(end_xsrc
), true, "realized", &local_err
);
1729 error_propagate(errp
, local_err
);
1733 /* Default page size. Generally changed at runtime to 64k */
1734 xive
->ic_shift
= xive
->vc_shift
= xive
->pc_shift
= 12;
1736 /* XSCOM region, used for initial configuration of the BARs */
1737 memory_region_init_io(&xive
->xscom_regs
, OBJECT(dev
), &pnv_xive_xscom_ops
,
1738 xive
, "xscom-xive", PNV9_XSCOM_XIVE_SIZE
<< 3);
1740 /* Interrupt controller MMIO regions */
1741 memory_region_init(&xive
->ic_mmio
, OBJECT(dev
), "xive-ic",
1744 memory_region_init_io(&xive
->ic_reg_mmio
, OBJECT(dev
), &pnv_xive_ic_reg_ops
,
1745 xive
, "xive-ic-reg", 1 << xive
->ic_shift
);
1746 memory_region_init_io(&xive
->ic_notify_mmio
, OBJECT(dev
),
1747 &pnv_xive_ic_notify_ops
,
1748 xive
, "xive-ic-notify", 1 << xive
->ic_shift
);
1750 /* The Pervasive LSI trigger and EOI pages (not modeled) */
1751 memory_region_init_io(&xive
->ic_lsi_mmio
, OBJECT(dev
), &pnv_xive_ic_lsi_ops
,
1752 xive
, "xive-ic-lsi", 2 << xive
->ic_shift
);
1754 /* Thread Interrupt Management Area (Indirect) */
1755 memory_region_init_io(&xive
->tm_indirect_mmio
, OBJECT(dev
),
1756 &xive_tm_indirect_ops
,
1757 xive
, "xive-tima-indirect", PNV9_XIVE_TM_SIZE
);
1759 * Overall Virtualization Controller MMIO region containing the
1760 * IPI ESB pages and END ESB pages. The layout is defined by the
1761 * EDT "Domain table" and the accesses are dispatched using
1762 * address spaces for each.
1764 memory_region_init_io(&xive
->vc_mmio
, OBJECT(xive
), &pnv_xive_vc_ops
, xive
,
1765 "xive-vc", PNV9_XIVE_VC_SIZE
);
1767 memory_region_init(&xive
->ipi_mmio
, OBJECT(xive
), "xive-vc-ipi",
1769 address_space_init(&xive
->ipi_as
, &xive
->ipi_mmio
, "xive-vc-ipi");
1770 memory_region_init(&xive
->end_mmio
, OBJECT(xive
), "xive-vc-end",
1772 address_space_init(&xive
->end_as
, &xive
->end_mmio
, "xive-vc-end");
1775 * The MMIO windows exposing the IPI ESBs and the END ESBs in the
1776 * VC region. Their size is configured by the FW in the EDT table.
1778 memory_region_init(&xive
->ipi_edt_mmio
, OBJECT(xive
), "xive-vc-ipi-edt", 0);
1779 memory_region_init(&xive
->end_edt_mmio
, OBJECT(xive
), "xive-vc-end-edt", 0);
1781 /* Presenter Controller MMIO region (not modeled) */
1782 memory_region_init_io(&xive
->pc_mmio
, OBJECT(xive
), &pnv_xive_pc_ops
, xive
,
1783 "xive-pc", PNV9_XIVE_PC_SIZE
);
1785 /* Thread Interrupt Management Area (Direct) */
1786 memory_region_init_io(&xive
->tm_mmio
, OBJECT(xive
), &xive_tm_ops
,
1787 xive
, "xive-tima", PNV9_XIVE_TM_SIZE
);
1789 qemu_register_reset(pnv_xive_reset
, dev
);
1792 static int pnv_xive_dt_xscom(PnvXScomInterface
*dev
, void *fdt
,
1795 const char compat
[] = "ibm,power9-xive-x";
1798 uint32_t lpc_pcba
= PNV9_XSCOM_XIVE_BASE
;
1800 cpu_to_be32(lpc_pcba
),
1801 cpu_to_be32(PNV9_XSCOM_XIVE_SIZE
)
1804 name
= g_strdup_printf("xive@%x", lpc_pcba
);
1805 offset
= fdt_add_subnode(fdt
, xscom_offset
, name
);
1809 _FDT((fdt_setprop(fdt
, offset
, "reg", reg
, sizeof(reg
))));
1810 _FDT((fdt_setprop(fdt
, offset
, "compatible", compat
,
1815 static Property pnv_xive_properties
[] = {
1816 DEFINE_PROP_UINT64("ic-bar", PnvXive
, ic_base
, 0),
1817 DEFINE_PROP_UINT64("vc-bar", PnvXive
, vc_base
, 0),
1818 DEFINE_PROP_UINT64("pc-bar", PnvXive
, pc_base
, 0),
1819 DEFINE_PROP_UINT64("tm-bar", PnvXive
, tm_base
, 0),
1820 /* The PnvChip id identifies the XIVE interrupt controller. */
1821 DEFINE_PROP_LINK("chip", PnvXive
, chip
, TYPE_PNV_CHIP
, PnvChip
*),
1822 DEFINE_PROP_END_OF_LIST(),
1825 static void pnv_xive_class_init(ObjectClass
*klass
, void *data
)
1827 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1828 PnvXScomInterfaceClass
*xdc
= PNV_XSCOM_INTERFACE_CLASS(klass
);
1829 XiveRouterClass
*xrc
= XIVE_ROUTER_CLASS(klass
);
1830 XiveNotifierClass
*xnc
= XIVE_NOTIFIER_CLASS(klass
);
1831 XivePresenterClass
*xpc
= XIVE_PRESENTER_CLASS(klass
);
1833 xdc
->dt_xscom
= pnv_xive_dt_xscom
;
1835 dc
->desc
= "PowerNV XIVE Interrupt Controller";
1836 dc
->realize
= pnv_xive_realize
;
1837 dc
->props
= pnv_xive_properties
;
1839 xrc
->get_eas
= pnv_xive_get_eas
;
1840 xrc
->get_end
= pnv_xive_get_end
;
1841 xrc
->write_end
= pnv_xive_write_end
;
1842 xrc
->get_nvt
= pnv_xive_get_nvt
;
1843 xrc
->write_nvt
= pnv_xive_write_nvt
;
1844 xrc
->get_tctx
= pnv_xive_get_tctx
;
1846 xnc
->notify
= pnv_xive_notify
;
1847 xpc
->match_nvt
= pnv_xive_match_nvt
;
1850 static const TypeInfo pnv_xive_info
= {
1851 .name
= TYPE_PNV_XIVE
,
1852 .parent
= TYPE_XIVE_ROUTER
,
1853 .instance_init
= pnv_xive_init
,
1854 .instance_size
= sizeof(PnvXive
),
1855 .class_init
= pnv_xive_class_init
,
1856 .interfaces
= (InterfaceInfo
[]) {
1857 { TYPE_PNV_XSCOM_INTERFACE
},
1862 static void pnv_xive_register_types(void)
1864 type_register_static(&pnv_xive_info
);
1867 type_init(pnv_xive_register_types
)