]> git.proxmox.com Git - mirror_qemu.git/blame - hw/ppc/spapr_irq.c
Merge remote-tracking branch 'remotes/vivier2/tags/trivial-branch-pull-request' into...
[mirror_qemu.git] / hw / ppc / spapr_irq.c
CommitLineData
82cffa2e
CLG
1/*
2 * QEMU PowerPC sPAPR IRQ interface
3 *
4 * Copyright (c) 2018, IBM Corporation.
5 *
6 * This code is licensed under the GPL version 2 or later. See the
7 * COPYING file in the top-level directory.
8 */
9
10#include "qemu/osdep.h"
11#include "qemu/log.h"
12#include "qemu/error-report.h"
13#include "qapi/error.h"
14#include "hw/ppc/spapr.h"
a28b9a5a 15#include "hw/ppc/spapr_cpu_core.h"
dcc345b6 16#include "hw/ppc/spapr_xive.h"
82cffa2e 17#include "hw/ppc/xics.h"
a51d5afc 18#include "hw/ppc/xics_spapr.h"
273fef83 19#include "cpu-models.h"
ef01ed9d
CLG
20#include "sysemu/kvm.h"
21
22#include "trace.h"
82cffa2e 23
ce2918cb 24void spapr_irq_msi_init(SpaprMachineState *spapr, uint32_t nr_msis)
82cffa2e
CLG
25{
26 spapr->irq_map_nr = nr_msis;
27 spapr->irq_map = bitmap_new(spapr->irq_map_nr);
28}
29
ce2918cb 30int spapr_irq_msi_alloc(SpaprMachineState *spapr, uint32_t num, bool align,
82cffa2e
CLG
31 Error **errp)
32{
33 int irq;
34
35 /*
36 * The 'align_mask' parameter of bitmap_find_next_zero_area()
37 * should be one less than a power of 2; 0 means no
38 * alignment. Adapt the 'align' value of the former allocator
39 * to fit the requirements of bitmap_find_next_zero_area()
40 */
41 align -= 1;
42
43 irq = bitmap_find_next_zero_area(spapr->irq_map, spapr->irq_map_nr, 0, num,
44 align);
45 if (irq == spapr->irq_map_nr) {
46 error_setg(errp, "can't find a free %d-IRQ block", num);
47 return -1;
48 }
49
50 bitmap_set(spapr->irq_map, irq, num);
51
52 return irq + SPAPR_IRQ_MSI;
53}
54
ce2918cb 55void spapr_irq_msi_free(SpaprMachineState *spapr, int irq, uint32_t num)
82cffa2e
CLG
56{
57 bitmap_clear(spapr->irq_map, irq - SPAPR_IRQ_MSI, num);
58}
59
ce2918cb 60void spapr_irq_msi_reset(SpaprMachineState *spapr)
82cffa2e
CLG
61{
62 bitmap_clear(spapr->irq_map, 0, spapr->irq_map_nr);
63}
ef01ed9d 64
d0e9bc04 65static void spapr_irq_init_kvm(SpaprMachineState *spapr,
ae805ea9 66 SpaprIrq *irq, Error **errp)
ef01ed9d
CLG
67{
68 MachineState *machine = MACHINE(spapr);
ef01ed9d
CLG
69 Error *local_err = NULL;
70
ae805ea9
CLG
71 if (kvm_enabled() && machine_kernel_irqchip_allowed(machine)) {
72 irq->init_kvm(spapr, &local_err);
73 if (local_err && machine_kernel_irqchip_required(machine)) {
ef01ed9d
CLG
74 error_prepend(&local_err,
75 "kernel_irqchip requested but unavailable: ");
f56275a2
CLG
76 error_propagate(errp, local_err);
77 return;
ef01ed9d 78 }
ae805ea9
CLG
79
80 if (!local_err) {
81 return;
82 }
83
84 /*
85 * We failed to initialize the KVM device, fallback to
86 * emulated mode
87 */
88 error_prepend(&local_err, "kernel_irqchip allowed but unavailable: ");
89 warn_report_err(local_err);
ef01ed9d 90 }
ae805ea9
CLG
91}
92
93/*
94 * XICS IRQ backend.
95 */
96
97static void spapr_irq_init_xics(SpaprMachineState *spapr, int nr_irqs,
98 Error **errp)
99{
100 Object *obj;
101 Error *local_err = NULL;
102
f56275a2
CLG
103 obj = object_new(TYPE_ICS_SIMPLE);
104 object_property_add_child(OBJECT(spapr), "ics", obj, &error_abort);
105 object_property_add_const_link(obj, ICS_PROP_XICS, OBJECT(spapr),
106 &error_fatal);
107 object_property_set_int(obj, nr_irqs, "nr-irqs", &error_fatal);
108 object_property_set_bool(obj, true, "realized", &local_err);
109 if (local_err) {
110 error_propagate(errp, local_err);
111 return;
112 }
444d6ca3 113
f56275a2 114 spapr->ics = ICS_BASE(obj);
d9293c48
GK
115
116 xics_spapr_init(spapr);
ef01ed9d
CLG
117}
118
119#define ICS_IRQ_FREE(ics, srcno) \
120 (!((ics)->irqs[(srcno)].flags & (XICS_FLAGS_IRQ_MASK)))
121
ce2918cb 122static int spapr_irq_claim_xics(SpaprMachineState *spapr, int irq, bool lsi,
ef01ed9d
CLG
123 Error **errp)
124{
125 ICSState *ics = spapr->ics;
126
127 assert(ics);
128
129 if (!ics_valid_irq(ics, irq)) {
130 error_setg(errp, "IRQ %d is invalid", irq);
131 return -1;
132 }
133
134 if (!ICS_IRQ_FREE(ics, irq - ics->offset)) {
135 error_setg(errp, "IRQ %d is not free", irq);
136 return -1;
137 }
138
139 ics_set_irq_type(ics, irq - ics->offset, lsi);
140 return 0;
141}
142
ce2918cb 143static void spapr_irq_free_xics(SpaprMachineState *spapr, int irq, int num)
ef01ed9d
CLG
144{
145 ICSState *ics = spapr->ics;
146 uint32_t srcno = irq - ics->offset;
147 int i;
148
149 if (ics_valid_irq(ics, irq)) {
150 trace_spapr_irq_free(0, irq, num);
151 for (i = srcno; i < srcno + num; ++i) {
152 if (ICS_IRQ_FREE(ics, i)) {
153 trace_spapr_irq_free_warn(0, i);
154 }
155 memset(&ics->irqs[i], 0, sizeof(ICSIRQState));
156 }
157 }
158}
159
ce2918cb 160static qemu_irq spapr_qirq_xics(SpaprMachineState *spapr, int irq)
ef01ed9d
CLG
161{
162 ICSState *ics = spapr->ics;
163 uint32_t srcno = irq - ics->offset;
164
165 if (ics_valid_irq(ics, irq)) {
872ff3de 166 return spapr->qirqs[srcno];
ef01ed9d
CLG
167 }
168
169 return NULL;
170}
171
ce2918cb 172static void spapr_irq_print_info_xics(SpaprMachineState *spapr, Monitor *mon)
ef01ed9d
CLG
173{
174 CPUState *cs;
175
176 CPU_FOREACH(cs) {
177 PowerPCCPU *cpu = POWERPC_CPU(cs);
178
a28b9a5a 179 icp_pic_print_info(spapr_cpu_state(cpu)->icp, mon);
ef01ed9d
CLG
180 }
181
182 ics_pic_print_info(spapr->ics, mon);
183}
184
ce2918cb 185static void spapr_irq_cpu_intc_create_xics(SpaprMachineState *spapr,
8fa1f4ef 186 PowerPCCPU *cpu, Error **errp)
1a937ad7 187{
8fa1f4ef
CLG
188 Error *local_err = NULL;
189 Object *obj;
ce2918cb 190 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
8fa1f4ef 191
56af6656 192 obj = icp_create(OBJECT(cpu), TYPE_ICP, XICS_FABRIC(spapr),
8fa1f4ef
CLG
193 &local_err);
194 if (local_err) {
195 error_propagate(errp, local_err);
196 return;
197 }
198
a28b9a5a 199 spapr_cpu->icp = ICP(obj);
1a937ad7
CLG
200}
201
ce2918cb 202static int spapr_irq_post_load_xics(SpaprMachineState *spapr, int version_id)
1c53b06c 203{
3272752a 204 if (!kvm_irqchip_in_kernel()) {
1c53b06c
CLG
205 CPUState *cs;
206 CPU_FOREACH(cs) {
207 PowerPCCPU *cpu = POWERPC_CPU(cs);
a28b9a5a 208 icp_resend(spapr_cpu_state(cpu)->icp);
1c53b06c
CLG
209 }
210 }
211 return 0;
212}
213
872ff3de
CLG
214static void spapr_irq_set_irq_xics(void *opaque, int srcno, int val)
215{
ce2918cb 216 SpaprMachineState *spapr = opaque;
872ff3de 217
557b4567 218 ics_simple_set_irq(spapr->ics, srcno, val);
872ff3de
CLG
219}
220
ce2918cb 221static void spapr_irq_reset_xics(SpaprMachineState *spapr, Error **errp)
13db0cd9 222{
3f777abc
CLG
223 Error *local_err = NULL;
224
d0e9bc04 225 spapr_irq_init_kvm(spapr, &spapr_irq_xics, &local_err);
3f777abc
CLG
226 if (local_err) {
227 error_propagate(errp, local_err);
228 return;
229 }
13db0cd9
CLG
230}
231
ce2918cb 232static const char *spapr_irq_get_nodename_xics(SpaprMachineState *spapr)
743ed566
GK
233{
234 return XICS_NODENAME;
235}
236
ae805ea9
CLG
237static void spapr_irq_init_kvm_xics(SpaprMachineState *spapr, Error **errp)
238{
239 if (kvm_enabled()) {
eab9f191 240 xics_kvm_connect(spapr, errp);
ae805ea9
CLG
241 }
242}
243
ae837402 244#define SPAPR_IRQ_XICS_NR_IRQS 0x1000
e39de895
CLG
245#define SPAPR_IRQ_XICS_NR_MSIS \
246 (XICS_IRQ_BASE + SPAPR_IRQ_XICS_NR_IRQS - SPAPR_IRQ_MSI)
247
ce2918cb 248SpaprIrq spapr_irq_xics = {
e39de895
CLG
249 .nr_irqs = SPAPR_IRQ_XICS_NR_IRQS,
250 .nr_msis = SPAPR_IRQ_XICS_NR_MSIS,
db592b5b 251 .ov5 = SPAPR_OV5_XIVE_LEGACY,
ef01ed9d
CLG
252
253 .init = spapr_irq_init_xics,
254 .claim = spapr_irq_claim_xics,
255 .free = spapr_irq_free_xics,
256 .qirq = spapr_qirq_xics,
257 .print_info = spapr_irq_print_info_xics,
6e21de4a 258 .dt_populate = spapr_dt_xics,
1a937ad7 259 .cpu_intc_create = spapr_irq_cpu_intc_create_xics,
1c53b06c 260 .post_load = spapr_irq_post_load_xics,
13db0cd9 261 .reset = spapr_irq_reset_xics,
872ff3de 262 .set_irq = spapr_irq_set_irq_xics,
743ed566 263 .get_nodename = spapr_irq_get_nodename_xics,
ae805ea9 264 .init_kvm = spapr_irq_init_kvm_xics,
ef01ed9d
CLG
265};
266
dcc345b6
CLG
267/*
268 * XIVE IRQ backend.
269 */
ce2918cb 270static void spapr_irq_init_xive(SpaprMachineState *spapr, int nr_irqs,
2e66cdb7 271 Error **errp)
dcc345b6 272{
dcc345b6
CLG
273 uint32_t nr_servers = spapr_max_server_number(spapr);
274 DeviceState *dev;
275 int i;
276
dcc345b6 277 dev = qdev_create(NULL, TYPE_SPAPR_XIVE);
2e66cdb7 278 qdev_prop_set_uint32(dev, "nr-irqs", nr_irqs);
dcc345b6
CLG
279 /*
280 * 8 XIVE END structures per CPU. One for each available priority
281 */
282 qdev_prop_set_uint32(dev, "nr-ends", nr_servers << 3);
283 qdev_init_nofail(dev);
284
285 spapr->xive = SPAPR_XIVE(dev);
286
287 /* Enable the CPU IPIs */
288 for (i = 0; i < nr_servers; ++i) {
289 spapr_xive_irq_claim(spapr->xive, SPAPR_IRQ_IPI + i, false);
290 }
23bcd5eb
CLG
291
292 spapr_xive_hcall_init(spapr);
dcc345b6
CLG
293}
294
ce2918cb 295static int spapr_irq_claim_xive(SpaprMachineState *spapr, int irq, bool lsi,
dcc345b6
CLG
296 Error **errp)
297{
298 if (!spapr_xive_irq_claim(spapr->xive, irq, lsi)) {
299 error_setg(errp, "IRQ %d is invalid", irq);
300 return -1;
301 }
302 return 0;
303}
304
ce2918cb 305static void spapr_irq_free_xive(SpaprMachineState *spapr, int irq, int num)
dcc345b6
CLG
306{
307 int i;
308
309 for (i = irq; i < irq + num; ++i) {
310 spapr_xive_irq_free(spapr->xive, i);
311 }
312}
313
ce2918cb 314static qemu_irq spapr_qirq_xive(SpaprMachineState *spapr, int irq)
dcc345b6 315{
ce2918cb 316 SpaprXive *xive = spapr->xive;
a0c493ae
CLG
317
318 if (irq >= xive->nr_irqs) {
319 return NULL;
320 }
321
322 /* The sPAPR machine/device should have claimed the IRQ before */
323 assert(xive_eas_is_valid(&xive->eat[irq]));
324
872ff3de 325 return spapr->qirqs[irq];
dcc345b6
CLG
326}
327
ce2918cb 328static void spapr_irq_print_info_xive(SpaprMachineState *spapr,
dcc345b6
CLG
329 Monitor *mon)
330{
331 CPUState *cs;
332
333 CPU_FOREACH(cs) {
334 PowerPCCPU *cpu = POWERPC_CPU(cs);
335
a28b9a5a 336 xive_tctx_pic_print_info(spapr_cpu_state(cpu)->tctx, mon);
dcc345b6
CLG
337 }
338
339 spapr_xive_pic_print_info(spapr->xive, mon);
340}
341
ce2918cb 342static void spapr_irq_cpu_intc_create_xive(SpaprMachineState *spapr,
8fa1f4ef 343 PowerPCCPU *cpu, Error **errp)
1a937ad7 344{
8fa1f4ef
CLG
345 Error *local_err = NULL;
346 Object *obj;
ce2918cb 347 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
8fa1f4ef
CLG
348
349 obj = xive_tctx_create(OBJECT(cpu), XIVE_ROUTER(spapr->xive), &local_err);
350 if (local_err) {
351 error_propagate(errp, local_err);
352 return;
353 }
354
a28b9a5a 355 spapr_cpu->tctx = XIVE_TCTX(obj);
b2e22477
CLG
356
357 /*
358 * (TCG) Early setting the OS CAM line for hotplugged CPUs as they
8fa1f4ef 359 * don't beneficiate from the reset of the XIVE IRQ backend
b2e22477 360 */
a28b9a5a 361 spapr_xive_set_tctx_os_cam(spapr_cpu->tctx);
1a937ad7
CLG
362}
363
ce2918cb 364static int spapr_irq_post_load_xive(SpaprMachineState *spapr, int version_id)
1c53b06c 365{
277dd3d7 366 return spapr_xive_post_load(spapr->xive, version_id);
1c53b06c
CLG
367}
368
ce2918cb 369static void spapr_irq_reset_xive(SpaprMachineState *spapr, Error **errp)
b2e22477
CLG
370{
371 CPUState *cs;
3f777abc 372 Error *local_err = NULL;
b2e22477
CLG
373
374 CPU_FOREACH(cs) {
375 PowerPCCPU *cpu = POWERPC_CPU(cs);
376
377 /* (TCG) Set the OS CAM line of the thread interrupt context. */
a28b9a5a 378 spapr_xive_set_tctx_os_cam(spapr_cpu_state(cpu)->tctx);
b2e22477 379 }
3a8eb78e 380
d0e9bc04 381 spapr_irq_init_kvm(spapr, &spapr_irq_xive, &local_err);
3f777abc
CLG
382 if (local_err) {
383 error_propagate(errp, local_err);
384 return;
385 }
386
3a8eb78e
CLG
387 /* Activate the XIVE MMIOs */
388 spapr_xive_mmio_set_enabled(spapr->xive, true);
b2e22477
CLG
389}
390
872ff3de
CLG
391static void spapr_irq_set_irq_xive(void *opaque, int srcno, int val)
392{
ce2918cb 393 SpaprMachineState *spapr = opaque;
872ff3de 394
38afd772
CLG
395 if (kvm_irqchip_in_kernel()) {
396 kvmppc_xive_source_set_irq(&spapr->xive->source, srcno, val);
397 } else {
398 xive_source_set_irq(&spapr->xive->source, srcno, val);
399 }
872ff3de
CLG
400}
401
ce2918cb 402static const char *spapr_irq_get_nodename_xive(SpaprMachineState *spapr)
743ed566
GK
403{
404 return spapr->xive->nodename;
405}
406
ae805ea9
CLG
407static void spapr_irq_init_kvm_xive(SpaprMachineState *spapr, Error **errp)
408{
409 if (kvm_enabled()) {
410 kvmppc_xive_connect(spapr->xive, errp);
411 }
412}
413
dcc345b6
CLG
414/*
415 * XIVE uses the full IRQ number space. Set it to 8K to be compatible
416 * with XICS.
417 */
418
419#define SPAPR_IRQ_XIVE_NR_IRQS 0x2000
420#define SPAPR_IRQ_XIVE_NR_MSIS (SPAPR_IRQ_XIVE_NR_IRQS - SPAPR_IRQ_MSI)
421
ce2918cb 422SpaprIrq spapr_irq_xive = {
dcc345b6
CLG
423 .nr_irqs = SPAPR_IRQ_XIVE_NR_IRQS,
424 .nr_msis = SPAPR_IRQ_XIVE_NR_MSIS,
db592b5b 425 .ov5 = SPAPR_OV5_XIVE_EXPLOIT,
dcc345b6
CLG
426
427 .init = spapr_irq_init_xive,
428 .claim = spapr_irq_claim_xive,
429 .free = spapr_irq_free_xive,
430 .qirq = spapr_qirq_xive,
431 .print_info = spapr_irq_print_info_xive,
6e21de4a 432 .dt_populate = spapr_dt_xive,
1a937ad7 433 .cpu_intc_create = spapr_irq_cpu_intc_create_xive,
1c53b06c 434 .post_load = spapr_irq_post_load_xive,
b2e22477 435 .reset = spapr_irq_reset_xive,
872ff3de 436 .set_irq = spapr_irq_set_irq_xive,
743ed566 437 .get_nodename = spapr_irq_get_nodename_xive,
ae805ea9 438 .init_kvm = spapr_irq_init_kvm_xive,
dcc345b6
CLG
439};
440
13db0cd9
CLG
441/*
442 * Dual XIVE and XICS IRQ backend.
443 *
444 * Both interrupt mode, XIVE and XICS, objects are created but the
445 * machine starts in legacy interrupt mode (XICS). It can be changed
446 * by the CAS negotiation process and, in that case, the new mode is
447 * activated after an extra machine reset.
448 */
449
450/*
451 * Returns the sPAPR IRQ backend negotiated by CAS. XICS is the
452 * default.
453 */
ce2918cb 454static SpaprIrq *spapr_irq_current(SpaprMachineState *spapr)
13db0cd9
CLG
455{
456 return spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT) ?
457 &spapr_irq_xive : &spapr_irq_xics;
458}
459
ce2918cb 460static void spapr_irq_init_dual(SpaprMachineState *spapr, int nr_irqs,
2e66cdb7 461 Error **errp)
13db0cd9 462{
13db0cd9
CLG
463 Error *local_err = NULL;
464
2e66cdb7 465 spapr_irq_xics.init(spapr, spapr_irq_xics.nr_irqs, &local_err);
13db0cd9
CLG
466 if (local_err) {
467 error_propagate(errp, local_err);
468 return;
469 }
470
2e66cdb7 471 spapr_irq_xive.init(spapr, spapr_irq_xive.nr_irqs, &local_err);
13db0cd9
CLG
472 if (local_err) {
473 error_propagate(errp, local_err);
474 return;
475 }
476}
477
ce2918cb 478static int spapr_irq_claim_dual(SpaprMachineState *spapr, int irq, bool lsi,
13db0cd9
CLG
479 Error **errp)
480{
481 Error *local_err = NULL;
482 int ret;
483
484 ret = spapr_irq_xics.claim(spapr, irq, lsi, &local_err);
485 if (local_err) {
486 error_propagate(errp, local_err);
487 return ret;
488 }
489
490 ret = spapr_irq_xive.claim(spapr, irq, lsi, &local_err);
491 if (local_err) {
492 error_propagate(errp, local_err);
493 return ret;
494 }
495
496 return ret;
497}
498
ce2918cb 499static void spapr_irq_free_dual(SpaprMachineState *spapr, int irq, int num)
13db0cd9
CLG
500{
501 spapr_irq_xics.free(spapr, irq, num);
502 spapr_irq_xive.free(spapr, irq, num);
503}
504
ce2918cb 505static qemu_irq spapr_qirq_dual(SpaprMachineState *spapr, int irq)
13db0cd9 506{
3a0d802c 507 return spapr_irq_current(spapr)->qirq(spapr, irq);
13db0cd9
CLG
508}
509
ce2918cb 510static void spapr_irq_print_info_dual(SpaprMachineState *spapr, Monitor *mon)
13db0cd9
CLG
511{
512 spapr_irq_current(spapr)->print_info(spapr, mon);
513}
514
ce2918cb 515static void spapr_irq_dt_populate_dual(SpaprMachineState *spapr,
13db0cd9
CLG
516 uint32_t nr_servers, void *fdt,
517 uint32_t phandle)
518{
519 spapr_irq_current(spapr)->dt_populate(spapr, nr_servers, fdt, phandle);
520}
521
ce2918cb 522static void spapr_irq_cpu_intc_create_dual(SpaprMachineState *spapr,
13db0cd9
CLG
523 PowerPCCPU *cpu, Error **errp)
524{
525 Error *local_err = NULL;
526
527 spapr_irq_xive.cpu_intc_create(spapr, cpu, &local_err);
528 if (local_err) {
529 error_propagate(errp, local_err);
530 return;
531 }
532
533 spapr_irq_xics.cpu_intc_create(spapr, cpu, errp);
534}
535
ce2918cb 536static int spapr_irq_post_load_dual(SpaprMachineState *spapr, int version_id)
13db0cd9
CLG
537{
538 /*
539 * Force a reset of the XIVE backend after migration. The machine
540 * defaults to XICS at startup.
541 */
542 if (spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
3f777abc
CLG
543 if (kvm_irqchip_in_kernel()) {
544 xics_kvm_disconnect(spapr, &error_fatal);
545 }
13db0cd9
CLG
546 spapr_irq_xive.reset(spapr, &error_fatal);
547 }
548
549 return spapr_irq_current(spapr)->post_load(spapr, version_id);
550}
551
ce2918cb 552static void spapr_irq_reset_dual(SpaprMachineState *spapr, Error **errp)
13db0cd9 553{
3f777abc
CLG
554 Error *local_err = NULL;
555
3a8eb78e
CLG
556 /*
557 * Deactivate the XIVE MMIOs. The XIVE backend will reenable them
558 * if selected.
559 */
560 spapr_xive_mmio_set_enabled(spapr->xive, false);
561
3f777abc
CLG
562 /* Destroy all KVM devices */
563 if (kvm_irqchip_in_kernel()) {
564 xics_kvm_disconnect(spapr, &local_err);
565 if (local_err) {
566 error_propagate(errp, local_err);
567 error_prepend(errp, "KVM XICS disconnect failed: ");
568 return;
569 }
570 kvmppc_xive_disconnect(spapr->xive, &local_err);
571 if (local_err) {
572 error_propagate(errp, local_err);
573 error_prepend(errp, "KVM XIVE disconnect failed: ");
574 return;
575 }
576 }
577
13db0cd9
CLG
578 spapr_irq_current(spapr)->reset(spapr, errp);
579}
580
581static void spapr_irq_set_irq_dual(void *opaque, int srcno, int val)
582{
ce2918cb 583 SpaprMachineState *spapr = opaque;
13db0cd9
CLG
584
585 spapr_irq_current(spapr)->set_irq(spapr, srcno, val);
586}
587
ce2918cb 588static const char *spapr_irq_get_nodename_dual(SpaprMachineState *spapr)
743ed566
GK
589{
590 return spapr_irq_current(spapr)->get_nodename(spapr);
591}
592
13db0cd9
CLG
593/*
594 * Define values in sync with the XIVE and XICS backend
595 */
596#define SPAPR_IRQ_DUAL_NR_IRQS 0x2000
597#define SPAPR_IRQ_DUAL_NR_MSIS (SPAPR_IRQ_DUAL_NR_IRQS - SPAPR_IRQ_MSI)
598
ce2918cb 599SpaprIrq spapr_irq_dual = {
13db0cd9
CLG
600 .nr_irqs = SPAPR_IRQ_DUAL_NR_IRQS,
601 .nr_msis = SPAPR_IRQ_DUAL_NR_MSIS,
602 .ov5 = SPAPR_OV5_XIVE_BOTH,
603
604 .init = spapr_irq_init_dual,
605 .claim = spapr_irq_claim_dual,
606 .free = spapr_irq_free_dual,
607 .qirq = spapr_qirq_dual,
608 .print_info = spapr_irq_print_info_dual,
609 .dt_populate = spapr_irq_dt_populate_dual,
610 .cpu_intc_create = spapr_irq_cpu_intc_create_dual,
611 .post_load = spapr_irq_post_load_dual,
612 .reset = spapr_irq_reset_dual,
743ed566
GK
613 .set_irq = spapr_irq_set_irq_dual,
614 .get_nodename = spapr_irq_get_nodename_dual,
ae805ea9 615 .init_kvm = NULL, /* should not be used */
13db0cd9
CLG
616};
617
273fef83
CLG
618
619static void spapr_irq_check(SpaprMachineState *spapr, Error **errp)
620{
621 MachineState *machine = MACHINE(spapr);
622
623 /*
624 * Sanity checks on non-P9 machines. On these, XIVE is not
625 * advertised, see spapr_dt_ov5_platform_support()
626 */
627 if (!ppc_type_check_compat(machine->cpu_type, CPU_POWERPC_LOGICAL_3_00,
628 0, spapr->max_compat_pvr)) {
629 /*
630 * If the 'dual' interrupt mode is selected, force XICS as CAS
631 * negotiation is useless.
632 */
633 if (spapr->irq == &spapr_irq_dual) {
634 spapr->irq = &spapr_irq_xics;
635 return;
636 }
637
638 /*
639 * Non-P9 machines using only XIVE is a bogus setup. We have two
640 * scenarios to take into account because of the compat mode:
641 *
642 * 1. POWER7/8 machines should fail to init later on when creating
643 * the XIVE interrupt presenters because a POWER9 exception
644 * model is required.
645
646 * 2. POWER9 machines using the POWER8 compat mode won't fail and
647 * will let the OS boot with a partial XIVE setup : DT
648 * properties but no hcalls.
649 *
650 * To cover both and not confuse the OS, add an early failure in
651 * QEMU.
652 */
653 if (spapr->irq == &spapr_irq_xive) {
654 error_setg(errp, "XIVE-only machines require a POWER9 CPU");
655 return;
656 }
657 }
7abc0c6d
GK
658
659 /*
660 * On a POWER9 host, some older KVM XICS devices cannot be destroyed and
661 * re-created. Detect that early to avoid QEMU to exit later when the
662 * guest reboots.
663 */
664 if (kvm_enabled() &&
665 spapr->irq == &spapr_irq_dual &&
666 machine_kernel_irqchip_required(machine) &&
667 xics_kvm_has_broken_disconnect(spapr)) {
668 error_setg(errp, "KVM is too old to support ic-mode=dual,kernel-irqchip=on");
669 return;
670 }
273fef83
CLG
671}
672
ef01ed9d
CLG
673/*
674 * sPAPR IRQ frontend routines for devices
675 */
ce2918cb 676void spapr_irq_init(SpaprMachineState *spapr, Error **errp)
fab397d8 677{
1a511340 678 MachineState *machine = MACHINE(spapr);
273fef83 679 Error *local_err = NULL;
1a511340
GK
680
681 if (machine_kernel_irqchip_split(machine)) {
682 error_setg(errp, "kernel_irqchip split mode not supported on pseries");
683 return;
684 }
685
686 if (!kvm_enabled() && machine_kernel_irqchip_required(machine)) {
687 error_setg(errp,
688 "kernel_irqchip requested but only available with KVM");
689 return;
690 }
691
273fef83
CLG
692 spapr_irq_check(spapr, &local_err);
693 if (local_err) {
694 error_propagate(errp, local_err);
695 return;
696 }
697
fab397d8
CLG
698 /* Initialize the MSI IRQ allocator. */
699 if (!SPAPR_MACHINE_GET_CLASS(spapr)->legacy_irq_allocation) {
3ba3d0bc 700 spapr_irq_msi_init(spapr, spapr->irq->nr_msis);
fab397d8
CLG
701 }
702
2e66cdb7 703 spapr->irq->init(spapr, spapr->irq->nr_irqs, errp);
872ff3de
CLG
704
705 spapr->qirqs = qemu_allocate_irqs(spapr->irq->set_irq, spapr,
706 spapr->irq->nr_irqs);
fab397d8 707}
ef01ed9d 708
ce2918cb 709int spapr_irq_claim(SpaprMachineState *spapr, int irq, bool lsi, Error **errp)
ef01ed9d 710{
3ba3d0bc 711 return spapr->irq->claim(spapr, irq, lsi, errp);
ef01ed9d
CLG
712}
713
ce2918cb 714void spapr_irq_free(SpaprMachineState *spapr, int irq, int num)
ef01ed9d 715{
3ba3d0bc 716 spapr->irq->free(spapr, irq, num);
ef01ed9d
CLG
717}
718
ce2918cb 719qemu_irq spapr_qirq(SpaprMachineState *spapr, int irq)
ef01ed9d 720{
3ba3d0bc 721 return spapr->irq->qirq(spapr, irq);
ef01ed9d
CLG
722}
723
ce2918cb 724int spapr_irq_post_load(SpaprMachineState *spapr, int version_id)
1c53b06c 725{
3ba3d0bc 726 return spapr->irq->post_load(spapr, version_id);
1c53b06c
CLG
727}
728
ce2918cb 729void spapr_irq_reset(SpaprMachineState *spapr, Error **errp)
b2e22477 730{
3ba3d0bc
CLG
731 if (spapr->irq->reset) {
732 spapr->irq->reset(spapr, errp);
b2e22477
CLG
733 }
734}
735
ce2918cb 736int spapr_irq_get_phandle(SpaprMachineState *spapr, void *fdt, Error **errp)
ad62bff6
GK
737{
738 const char *nodename = spapr->irq->get_nodename(spapr);
739 int offset, phandle;
740
741 offset = fdt_subnode_offset(fdt, 0, nodename);
742 if (offset < 0) {
743 error_setg(errp, "Can't find node \"%s\": %s", nodename,
744 fdt_strerror(offset));
745 return -1;
746 }
747
748 phandle = fdt_get_phandle(fdt, offset);
749 if (!phandle) {
750 error_setg(errp, "Can't get phandle of node \"%s\"", nodename);
751 return -1;
752 }
753
754 return phandle;
755}
756
ef01ed9d
CLG
757/*
758 * XICS legacy routines - to deprecate one day
759 */
760
761static int ics_find_free_block(ICSState *ics, int num, int alignnum)
762{
763 int first, i;
764
765 for (first = 0; first < ics->nr_irqs; first += alignnum) {
766 if (num > (ics->nr_irqs - first)) {
767 return -1;
768 }
769 for (i = first; i < first + num; ++i) {
770 if (!ICS_IRQ_FREE(ics, i)) {
771 break;
772 }
773 }
774 if (i == (first + num)) {
775 return first;
776 }
777 }
778
779 return -1;
780}
781
ce2918cb 782int spapr_irq_find(SpaprMachineState *spapr, int num, bool align, Error **errp)
ef01ed9d
CLG
783{
784 ICSState *ics = spapr->ics;
785 int first = -1;
786
787 assert(ics);
788
789 /*
790 * MSIMesage::data is used for storing VIRQ so
791 * it has to be aligned to num to support multiple
792 * MSI vectors. MSI-X is not affected by this.
793 * The hint is used for the first IRQ, the rest should
794 * be allocated continuously.
795 */
796 if (align) {
797 assert((num == 1) || (num == 2) || (num == 4) ||
798 (num == 8) || (num == 16) || (num == 32));
799 first = ics_find_free_block(ics, num, num);
800 } else {
801 first = ics_find_free_block(ics, num, 1);
802 }
803
804 if (first < 0) {
805 error_setg(errp, "can't find a free %d-IRQ block", num);
806 return -1;
807 }
808
809 return first + ics->offset;
810}
ae837402
CLG
811
812#define SPAPR_IRQ_XICS_LEGACY_NR_IRQS 0x400
813
ce2918cb 814SpaprIrq spapr_irq_xics_legacy = {
ae837402
CLG
815 .nr_irqs = SPAPR_IRQ_XICS_LEGACY_NR_IRQS,
816 .nr_msis = SPAPR_IRQ_XICS_LEGACY_NR_IRQS,
db592b5b 817 .ov5 = SPAPR_OV5_XIVE_LEGACY,
ae837402
CLG
818
819 .init = spapr_irq_init_xics,
820 .claim = spapr_irq_claim_xics,
821 .free = spapr_irq_free_xics,
822 .qirq = spapr_qirq_xics,
823 .print_info = spapr_irq_print_info_xics,
6e21de4a 824 .dt_populate = spapr_dt_xics,
1a937ad7 825 .cpu_intc_create = spapr_irq_cpu_intc_create_xics,
1c53b06c 826 .post_load = spapr_irq_post_load_xics,
3f777abc 827 .reset = spapr_irq_reset_xics,
872ff3de 828 .set_irq = spapr_irq_set_irq_xics,
743ed566 829 .get_nodename = spapr_irq_get_nodename_xics,
3f777abc 830 .init_kvm = spapr_irq_init_kvm_xics,
ae837402 831};