]> git.proxmox.com Git - mirror_qemu.git/blob - hw/ppc/spapr_irq.c
Include hw/qdev-properties.h less
[mirror_qemu.git] / hw / ppc / spapr_irq.c
1 /*
2 * QEMU PowerPC sPAPR IRQ interface
3 *
4 * Copyright (c) 2018, IBM Corporation.
5 *
6 * This code is licensed under the GPL version 2 or later. See the
7 * COPYING file in the top-level directory.
8 */
9
10 #include "qemu/osdep.h"
11 #include "qemu/log.h"
12 #include "qemu/error-report.h"
13 #include "qapi/error.h"
14 #include "hw/irq.h"
15 #include "hw/ppc/spapr.h"
16 #include "hw/ppc/spapr_cpu_core.h"
17 #include "hw/ppc/spapr_xive.h"
18 #include "hw/ppc/xics.h"
19 #include "hw/ppc/xics_spapr.h"
20 #include "hw/qdev-properties.h"
21 #include "cpu-models.h"
22 #include "sysemu/kvm.h"
23
24 #include "trace.h"
25
26 void spapr_irq_msi_init(SpaprMachineState *spapr, uint32_t nr_msis)
27 {
28 spapr->irq_map_nr = nr_msis;
29 spapr->irq_map = bitmap_new(spapr->irq_map_nr);
30 }
31
32 int spapr_irq_msi_alloc(SpaprMachineState *spapr, uint32_t num, bool align,
33 Error **errp)
34 {
35 int irq;
36
37 /*
38 * The 'align_mask' parameter of bitmap_find_next_zero_area()
39 * should be one less than a power of 2; 0 means no
40 * alignment. Adapt the 'align' value of the former allocator
41 * to fit the requirements of bitmap_find_next_zero_area()
42 */
43 align -= 1;
44
45 irq = bitmap_find_next_zero_area(spapr->irq_map, spapr->irq_map_nr, 0, num,
46 align);
47 if (irq == spapr->irq_map_nr) {
48 error_setg(errp, "can't find a free %d-IRQ block", num);
49 return -1;
50 }
51
52 bitmap_set(spapr->irq_map, irq, num);
53
54 return irq + SPAPR_IRQ_MSI;
55 }
56
57 void spapr_irq_msi_free(SpaprMachineState *spapr, int irq, uint32_t num)
58 {
59 bitmap_clear(spapr->irq_map, irq - SPAPR_IRQ_MSI, num);
60 }
61
62 void spapr_irq_msi_reset(SpaprMachineState *spapr)
63 {
64 bitmap_clear(spapr->irq_map, 0, spapr->irq_map_nr);
65 }
66
67 static void spapr_irq_init_kvm(SpaprMachineState *spapr,
68 SpaprIrq *irq, Error **errp)
69 {
70 MachineState *machine = MACHINE(spapr);
71 Error *local_err = NULL;
72
73 if (kvm_enabled() && machine_kernel_irqchip_allowed(machine)) {
74 irq->init_kvm(spapr, &local_err);
75 if (local_err && machine_kernel_irqchip_required(machine)) {
76 error_prepend(&local_err,
77 "kernel_irqchip requested but unavailable: ");
78 error_propagate(errp, local_err);
79 return;
80 }
81
82 if (!local_err) {
83 return;
84 }
85
86 /*
87 * We failed to initialize the KVM device, fallback to
88 * emulated mode
89 */
90 error_prepend(&local_err, "kernel_irqchip allowed but unavailable: ");
91 error_append_hint(&local_err, "Falling back to kernel-irqchip=off\n");
92 warn_report_err(local_err);
93 }
94 }
95
96 /*
97 * XICS IRQ backend.
98 */
99
100 static void spapr_irq_init_xics(SpaprMachineState *spapr, int nr_irqs,
101 Error **errp)
102 {
103 Object *obj;
104 Error *local_err = NULL;
105
106 obj = object_new(TYPE_ICS_SIMPLE);
107 object_property_add_child(OBJECT(spapr), "ics", obj, &error_abort);
108 object_property_add_const_link(obj, ICS_PROP_XICS, OBJECT(spapr),
109 &error_fatal);
110 object_property_set_int(obj, nr_irqs, "nr-irqs", &error_fatal);
111 object_property_set_bool(obj, true, "realized", &local_err);
112 if (local_err) {
113 error_propagate(errp, local_err);
114 return;
115 }
116
117 spapr->ics = ICS_BASE(obj);
118
119 xics_spapr_init(spapr);
120 }
121
122 #define ICS_IRQ_FREE(ics, srcno) \
123 (!((ics)->irqs[(srcno)].flags & (XICS_FLAGS_IRQ_MASK)))
124
125 static int spapr_irq_claim_xics(SpaprMachineState *spapr, int irq, bool lsi,
126 Error **errp)
127 {
128 ICSState *ics = spapr->ics;
129
130 assert(ics);
131
132 if (!ics_valid_irq(ics, irq)) {
133 error_setg(errp, "IRQ %d is invalid", irq);
134 return -1;
135 }
136
137 if (!ICS_IRQ_FREE(ics, irq - ics->offset)) {
138 error_setg(errp, "IRQ %d is not free", irq);
139 return -1;
140 }
141
142 ics_set_irq_type(ics, irq - ics->offset, lsi);
143 return 0;
144 }
145
146 static void spapr_irq_free_xics(SpaprMachineState *spapr, int irq, int num)
147 {
148 ICSState *ics = spapr->ics;
149 uint32_t srcno = irq - ics->offset;
150 int i;
151
152 if (ics_valid_irq(ics, irq)) {
153 trace_spapr_irq_free(0, irq, num);
154 for (i = srcno; i < srcno + num; ++i) {
155 if (ICS_IRQ_FREE(ics, i)) {
156 trace_spapr_irq_free_warn(0, i);
157 }
158 memset(&ics->irqs[i], 0, sizeof(ICSIRQState));
159 }
160 }
161 }
162
163 static qemu_irq spapr_qirq_xics(SpaprMachineState *spapr, int irq)
164 {
165 ICSState *ics = spapr->ics;
166 uint32_t srcno = irq - ics->offset;
167
168 if (ics_valid_irq(ics, irq)) {
169 return spapr->qirqs[srcno];
170 }
171
172 return NULL;
173 }
174
175 static void spapr_irq_print_info_xics(SpaprMachineState *spapr, Monitor *mon)
176 {
177 CPUState *cs;
178
179 CPU_FOREACH(cs) {
180 PowerPCCPU *cpu = POWERPC_CPU(cs);
181
182 icp_pic_print_info(spapr_cpu_state(cpu)->icp, mon);
183 }
184
185 ics_pic_print_info(spapr->ics, mon);
186 }
187
188 static void spapr_irq_cpu_intc_create_xics(SpaprMachineState *spapr,
189 PowerPCCPU *cpu, Error **errp)
190 {
191 Error *local_err = NULL;
192 Object *obj;
193 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
194
195 obj = icp_create(OBJECT(cpu), TYPE_ICP, XICS_FABRIC(spapr),
196 &local_err);
197 if (local_err) {
198 error_propagate(errp, local_err);
199 return;
200 }
201
202 spapr_cpu->icp = ICP(obj);
203 }
204
205 static int spapr_irq_post_load_xics(SpaprMachineState *spapr, int version_id)
206 {
207 if (!kvm_irqchip_in_kernel()) {
208 CPUState *cs;
209 CPU_FOREACH(cs) {
210 PowerPCCPU *cpu = POWERPC_CPU(cs);
211 icp_resend(spapr_cpu_state(cpu)->icp);
212 }
213 }
214 return 0;
215 }
216
217 static void spapr_irq_set_irq_xics(void *opaque, int srcno, int val)
218 {
219 SpaprMachineState *spapr = opaque;
220
221 ics_simple_set_irq(spapr->ics, srcno, val);
222 }
223
224 static void spapr_irq_reset_xics(SpaprMachineState *spapr, Error **errp)
225 {
226 Error *local_err = NULL;
227
228 spapr_irq_init_kvm(spapr, &spapr_irq_xics, &local_err);
229 if (local_err) {
230 error_propagate(errp, local_err);
231 return;
232 }
233 }
234
235 static const char *spapr_irq_get_nodename_xics(SpaprMachineState *spapr)
236 {
237 return XICS_NODENAME;
238 }
239
240 static void spapr_irq_init_kvm_xics(SpaprMachineState *spapr, Error **errp)
241 {
242 if (kvm_enabled()) {
243 xics_kvm_connect(spapr, errp);
244 }
245 }
246
247 #define SPAPR_IRQ_XICS_NR_IRQS 0x1000
248 #define SPAPR_IRQ_XICS_NR_MSIS \
249 (XICS_IRQ_BASE + SPAPR_IRQ_XICS_NR_IRQS - SPAPR_IRQ_MSI)
250
251 SpaprIrq spapr_irq_xics = {
252 .nr_irqs = SPAPR_IRQ_XICS_NR_IRQS,
253 .nr_msis = SPAPR_IRQ_XICS_NR_MSIS,
254 .ov5 = SPAPR_OV5_XIVE_LEGACY,
255
256 .init = spapr_irq_init_xics,
257 .claim = spapr_irq_claim_xics,
258 .free = spapr_irq_free_xics,
259 .qirq = spapr_qirq_xics,
260 .print_info = spapr_irq_print_info_xics,
261 .dt_populate = spapr_dt_xics,
262 .cpu_intc_create = spapr_irq_cpu_intc_create_xics,
263 .post_load = spapr_irq_post_load_xics,
264 .reset = spapr_irq_reset_xics,
265 .set_irq = spapr_irq_set_irq_xics,
266 .get_nodename = spapr_irq_get_nodename_xics,
267 .init_kvm = spapr_irq_init_kvm_xics,
268 };
269
270 /*
271 * XIVE IRQ backend.
272 */
273 static void spapr_irq_init_xive(SpaprMachineState *spapr, int nr_irqs,
274 Error **errp)
275 {
276 uint32_t nr_servers = spapr_max_server_number(spapr);
277 DeviceState *dev;
278 int i;
279
280 dev = qdev_create(NULL, TYPE_SPAPR_XIVE);
281 qdev_prop_set_uint32(dev, "nr-irqs", nr_irqs);
282 /*
283 * 8 XIVE END structures per CPU. One for each available priority
284 */
285 qdev_prop_set_uint32(dev, "nr-ends", nr_servers << 3);
286 qdev_init_nofail(dev);
287
288 spapr->xive = SPAPR_XIVE(dev);
289
290 /* Enable the CPU IPIs */
291 for (i = 0; i < nr_servers; ++i) {
292 spapr_xive_irq_claim(spapr->xive, SPAPR_IRQ_IPI + i, false);
293 }
294
295 spapr_xive_hcall_init(spapr);
296 }
297
298 static int spapr_irq_claim_xive(SpaprMachineState *spapr, int irq, bool lsi,
299 Error **errp)
300 {
301 if (!spapr_xive_irq_claim(spapr->xive, irq, lsi)) {
302 error_setg(errp, "IRQ %d is invalid", irq);
303 return -1;
304 }
305 return 0;
306 }
307
308 static void spapr_irq_free_xive(SpaprMachineState *spapr, int irq, int num)
309 {
310 int i;
311
312 for (i = irq; i < irq + num; ++i) {
313 spapr_xive_irq_free(spapr->xive, i);
314 }
315 }
316
317 static qemu_irq spapr_qirq_xive(SpaprMachineState *spapr, int irq)
318 {
319 SpaprXive *xive = spapr->xive;
320
321 if (irq >= xive->nr_irqs) {
322 return NULL;
323 }
324
325 /* The sPAPR machine/device should have claimed the IRQ before */
326 assert(xive_eas_is_valid(&xive->eat[irq]));
327
328 return spapr->qirqs[irq];
329 }
330
331 static void spapr_irq_print_info_xive(SpaprMachineState *spapr,
332 Monitor *mon)
333 {
334 CPUState *cs;
335
336 CPU_FOREACH(cs) {
337 PowerPCCPU *cpu = POWERPC_CPU(cs);
338
339 xive_tctx_pic_print_info(spapr_cpu_state(cpu)->tctx, mon);
340 }
341
342 spapr_xive_pic_print_info(spapr->xive, mon);
343 }
344
345 static void spapr_irq_cpu_intc_create_xive(SpaprMachineState *spapr,
346 PowerPCCPU *cpu, Error **errp)
347 {
348 Error *local_err = NULL;
349 Object *obj;
350 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
351
352 obj = xive_tctx_create(OBJECT(cpu), XIVE_ROUTER(spapr->xive), &local_err);
353 if (local_err) {
354 error_propagate(errp, local_err);
355 return;
356 }
357
358 spapr_cpu->tctx = XIVE_TCTX(obj);
359
360 /*
361 * (TCG) Early setting the OS CAM line for hotplugged CPUs as they
362 * don't beneficiate from the reset of the XIVE IRQ backend
363 */
364 spapr_xive_set_tctx_os_cam(spapr_cpu->tctx);
365 }
366
367 static int spapr_irq_post_load_xive(SpaprMachineState *spapr, int version_id)
368 {
369 return spapr_xive_post_load(spapr->xive, version_id);
370 }
371
372 static void spapr_irq_reset_xive(SpaprMachineState *spapr, Error **errp)
373 {
374 CPUState *cs;
375 Error *local_err = NULL;
376
377 CPU_FOREACH(cs) {
378 PowerPCCPU *cpu = POWERPC_CPU(cs);
379
380 /* (TCG) Set the OS CAM line of the thread interrupt context. */
381 spapr_xive_set_tctx_os_cam(spapr_cpu_state(cpu)->tctx);
382 }
383
384 spapr_irq_init_kvm(spapr, &spapr_irq_xive, &local_err);
385 if (local_err) {
386 error_propagate(errp, local_err);
387 return;
388 }
389
390 /* Activate the XIVE MMIOs */
391 spapr_xive_mmio_set_enabled(spapr->xive, true);
392 }
393
394 static void spapr_irq_set_irq_xive(void *opaque, int srcno, int val)
395 {
396 SpaprMachineState *spapr = opaque;
397
398 if (kvm_irqchip_in_kernel()) {
399 kvmppc_xive_source_set_irq(&spapr->xive->source, srcno, val);
400 } else {
401 xive_source_set_irq(&spapr->xive->source, srcno, val);
402 }
403 }
404
405 static const char *spapr_irq_get_nodename_xive(SpaprMachineState *spapr)
406 {
407 return spapr->xive->nodename;
408 }
409
410 static void spapr_irq_init_kvm_xive(SpaprMachineState *spapr, Error **errp)
411 {
412 if (kvm_enabled()) {
413 kvmppc_xive_connect(spapr->xive, errp);
414 }
415 }
416
417 /*
418 * XIVE uses the full IRQ number space. Set it to 8K to be compatible
419 * with XICS.
420 */
421
422 #define SPAPR_IRQ_XIVE_NR_IRQS 0x2000
423 #define SPAPR_IRQ_XIVE_NR_MSIS (SPAPR_IRQ_XIVE_NR_IRQS - SPAPR_IRQ_MSI)
424
425 SpaprIrq spapr_irq_xive = {
426 .nr_irqs = SPAPR_IRQ_XIVE_NR_IRQS,
427 .nr_msis = SPAPR_IRQ_XIVE_NR_MSIS,
428 .ov5 = SPAPR_OV5_XIVE_EXPLOIT,
429
430 .init = spapr_irq_init_xive,
431 .claim = spapr_irq_claim_xive,
432 .free = spapr_irq_free_xive,
433 .qirq = spapr_qirq_xive,
434 .print_info = spapr_irq_print_info_xive,
435 .dt_populate = spapr_dt_xive,
436 .cpu_intc_create = spapr_irq_cpu_intc_create_xive,
437 .post_load = spapr_irq_post_load_xive,
438 .reset = spapr_irq_reset_xive,
439 .set_irq = spapr_irq_set_irq_xive,
440 .get_nodename = spapr_irq_get_nodename_xive,
441 .init_kvm = spapr_irq_init_kvm_xive,
442 };
443
444 /*
445 * Dual XIVE and XICS IRQ backend.
446 *
447 * Both interrupt mode, XIVE and XICS, objects are created but the
448 * machine starts in legacy interrupt mode (XICS). It can be changed
449 * by the CAS negotiation process and, in that case, the new mode is
450 * activated after an extra machine reset.
451 */
452
453 /*
454 * Returns the sPAPR IRQ backend negotiated by CAS. XICS is the
455 * default.
456 */
457 static SpaprIrq *spapr_irq_current(SpaprMachineState *spapr)
458 {
459 return spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT) ?
460 &spapr_irq_xive : &spapr_irq_xics;
461 }
462
463 static void spapr_irq_init_dual(SpaprMachineState *spapr, int nr_irqs,
464 Error **errp)
465 {
466 Error *local_err = NULL;
467
468 spapr_irq_xics.init(spapr, spapr_irq_xics.nr_irqs, &local_err);
469 if (local_err) {
470 error_propagate(errp, local_err);
471 return;
472 }
473
474 spapr_irq_xive.init(spapr, spapr_irq_xive.nr_irqs, &local_err);
475 if (local_err) {
476 error_propagate(errp, local_err);
477 return;
478 }
479 }
480
481 static int spapr_irq_claim_dual(SpaprMachineState *spapr, int irq, bool lsi,
482 Error **errp)
483 {
484 Error *local_err = NULL;
485 int ret;
486
487 ret = spapr_irq_xics.claim(spapr, irq, lsi, &local_err);
488 if (local_err) {
489 error_propagate(errp, local_err);
490 return ret;
491 }
492
493 ret = spapr_irq_xive.claim(spapr, irq, lsi, &local_err);
494 if (local_err) {
495 error_propagate(errp, local_err);
496 return ret;
497 }
498
499 return ret;
500 }
501
502 static void spapr_irq_free_dual(SpaprMachineState *spapr, int irq, int num)
503 {
504 spapr_irq_xics.free(spapr, irq, num);
505 spapr_irq_xive.free(spapr, irq, num);
506 }
507
508 static qemu_irq spapr_qirq_dual(SpaprMachineState *spapr, int irq)
509 {
510 return spapr_irq_current(spapr)->qirq(spapr, irq);
511 }
512
513 static void spapr_irq_print_info_dual(SpaprMachineState *spapr, Monitor *mon)
514 {
515 spapr_irq_current(spapr)->print_info(spapr, mon);
516 }
517
518 static void spapr_irq_dt_populate_dual(SpaprMachineState *spapr,
519 uint32_t nr_servers, void *fdt,
520 uint32_t phandle)
521 {
522 spapr_irq_current(spapr)->dt_populate(spapr, nr_servers, fdt, phandle);
523 }
524
525 static void spapr_irq_cpu_intc_create_dual(SpaprMachineState *spapr,
526 PowerPCCPU *cpu, Error **errp)
527 {
528 Error *local_err = NULL;
529
530 spapr_irq_xive.cpu_intc_create(spapr, cpu, &local_err);
531 if (local_err) {
532 error_propagate(errp, local_err);
533 return;
534 }
535
536 spapr_irq_xics.cpu_intc_create(spapr, cpu, errp);
537 }
538
539 static int spapr_irq_post_load_dual(SpaprMachineState *spapr, int version_id)
540 {
541 /*
542 * Force a reset of the XIVE backend after migration. The machine
543 * defaults to XICS at startup.
544 */
545 if (spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
546 if (kvm_irqchip_in_kernel()) {
547 xics_kvm_disconnect(spapr, &error_fatal);
548 }
549 spapr_irq_xive.reset(spapr, &error_fatal);
550 }
551
552 return spapr_irq_current(spapr)->post_load(spapr, version_id);
553 }
554
555 static void spapr_irq_reset_dual(SpaprMachineState *spapr, Error **errp)
556 {
557 Error *local_err = NULL;
558
559 /*
560 * Deactivate the XIVE MMIOs. The XIVE backend will reenable them
561 * if selected.
562 */
563 spapr_xive_mmio_set_enabled(spapr->xive, false);
564
565 /* Destroy all KVM devices */
566 if (kvm_irqchip_in_kernel()) {
567 xics_kvm_disconnect(spapr, &local_err);
568 if (local_err) {
569 error_propagate(errp, local_err);
570 error_prepend(errp, "KVM XICS disconnect failed: ");
571 return;
572 }
573 kvmppc_xive_disconnect(spapr->xive, &local_err);
574 if (local_err) {
575 error_propagate(errp, local_err);
576 error_prepend(errp, "KVM XIVE disconnect failed: ");
577 return;
578 }
579 }
580
581 spapr_irq_current(spapr)->reset(spapr, errp);
582 }
583
584 static void spapr_irq_set_irq_dual(void *opaque, int srcno, int val)
585 {
586 SpaprMachineState *spapr = opaque;
587
588 spapr_irq_current(spapr)->set_irq(spapr, srcno, val);
589 }
590
591 static const char *spapr_irq_get_nodename_dual(SpaprMachineState *spapr)
592 {
593 return spapr_irq_current(spapr)->get_nodename(spapr);
594 }
595
596 /*
597 * Define values in sync with the XIVE and XICS backend
598 */
599 #define SPAPR_IRQ_DUAL_NR_IRQS 0x2000
600 #define SPAPR_IRQ_DUAL_NR_MSIS (SPAPR_IRQ_DUAL_NR_IRQS - SPAPR_IRQ_MSI)
601
602 SpaprIrq spapr_irq_dual = {
603 .nr_irqs = SPAPR_IRQ_DUAL_NR_IRQS,
604 .nr_msis = SPAPR_IRQ_DUAL_NR_MSIS,
605 .ov5 = SPAPR_OV5_XIVE_BOTH,
606
607 .init = spapr_irq_init_dual,
608 .claim = spapr_irq_claim_dual,
609 .free = spapr_irq_free_dual,
610 .qirq = spapr_qirq_dual,
611 .print_info = spapr_irq_print_info_dual,
612 .dt_populate = spapr_irq_dt_populate_dual,
613 .cpu_intc_create = spapr_irq_cpu_intc_create_dual,
614 .post_load = spapr_irq_post_load_dual,
615 .reset = spapr_irq_reset_dual,
616 .set_irq = spapr_irq_set_irq_dual,
617 .get_nodename = spapr_irq_get_nodename_dual,
618 .init_kvm = NULL, /* should not be used */
619 };
620
621
622 static void spapr_irq_check(SpaprMachineState *spapr, Error **errp)
623 {
624 MachineState *machine = MACHINE(spapr);
625
626 /*
627 * Sanity checks on non-P9 machines. On these, XIVE is not
628 * advertised, see spapr_dt_ov5_platform_support()
629 */
630 if (!ppc_type_check_compat(machine->cpu_type, CPU_POWERPC_LOGICAL_3_00,
631 0, spapr->max_compat_pvr)) {
632 /*
633 * If the 'dual' interrupt mode is selected, force XICS as CAS
634 * negotiation is useless.
635 */
636 if (spapr->irq == &spapr_irq_dual) {
637 spapr->irq = &spapr_irq_xics;
638 return;
639 }
640
641 /*
642 * Non-P9 machines using only XIVE is a bogus setup. We have two
643 * scenarios to take into account because of the compat mode:
644 *
645 * 1. POWER7/8 machines should fail to init later on when creating
646 * the XIVE interrupt presenters because a POWER9 exception
647 * model is required.
648
649 * 2. POWER9 machines using the POWER8 compat mode won't fail and
650 * will let the OS boot with a partial XIVE setup : DT
651 * properties but no hcalls.
652 *
653 * To cover both and not confuse the OS, add an early failure in
654 * QEMU.
655 */
656 if (spapr->irq == &spapr_irq_xive) {
657 error_setg(errp, "XIVE-only machines require a POWER9 CPU");
658 return;
659 }
660 }
661
662 /*
663 * On a POWER9 host, some older KVM XICS devices cannot be destroyed and
664 * re-created. Detect that early to avoid QEMU to exit later when the
665 * guest reboots.
666 */
667 if (kvm_enabled() &&
668 spapr->irq == &spapr_irq_dual &&
669 machine_kernel_irqchip_required(machine) &&
670 xics_kvm_has_broken_disconnect(spapr)) {
671 error_setg(errp, "KVM is too old to support ic-mode=dual,kernel-irqchip=on");
672 return;
673 }
674 }
675
676 /*
677 * sPAPR IRQ frontend routines for devices
678 */
679 void spapr_irq_init(SpaprMachineState *spapr, Error **errp)
680 {
681 MachineState *machine = MACHINE(spapr);
682 Error *local_err = NULL;
683
684 if (machine_kernel_irqchip_split(machine)) {
685 error_setg(errp, "kernel_irqchip split mode not supported on pseries");
686 return;
687 }
688
689 if (!kvm_enabled() && machine_kernel_irqchip_required(machine)) {
690 error_setg(errp,
691 "kernel_irqchip requested but only available with KVM");
692 return;
693 }
694
695 spapr_irq_check(spapr, &local_err);
696 if (local_err) {
697 error_propagate(errp, local_err);
698 return;
699 }
700
701 /* Initialize the MSI IRQ allocator. */
702 if (!SPAPR_MACHINE_GET_CLASS(spapr)->legacy_irq_allocation) {
703 spapr_irq_msi_init(spapr, spapr->irq->nr_msis);
704 }
705
706 spapr->irq->init(spapr, spapr->irq->nr_irqs, errp);
707
708 spapr->qirqs = qemu_allocate_irqs(spapr->irq->set_irq, spapr,
709 spapr->irq->nr_irqs);
710 }
711
712 int spapr_irq_claim(SpaprMachineState *spapr, int irq, bool lsi, Error **errp)
713 {
714 return spapr->irq->claim(spapr, irq, lsi, errp);
715 }
716
717 void spapr_irq_free(SpaprMachineState *spapr, int irq, int num)
718 {
719 spapr->irq->free(spapr, irq, num);
720 }
721
722 qemu_irq spapr_qirq(SpaprMachineState *spapr, int irq)
723 {
724 return spapr->irq->qirq(spapr, irq);
725 }
726
727 int spapr_irq_post_load(SpaprMachineState *spapr, int version_id)
728 {
729 return spapr->irq->post_load(spapr, version_id);
730 }
731
732 void spapr_irq_reset(SpaprMachineState *spapr, Error **errp)
733 {
734 if (spapr->irq->reset) {
735 spapr->irq->reset(spapr, errp);
736 }
737 }
738
739 int spapr_irq_get_phandle(SpaprMachineState *spapr, void *fdt, Error **errp)
740 {
741 const char *nodename = spapr->irq->get_nodename(spapr);
742 int offset, phandle;
743
744 offset = fdt_subnode_offset(fdt, 0, nodename);
745 if (offset < 0) {
746 error_setg(errp, "Can't find node \"%s\": %s", nodename,
747 fdt_strerror(offset));
748 return -1;
749 }
750
751 phandle = fdt_get_phandle(fdt, offset);
752 if (!phandle) {
753 error_setg(errp, "Can't get phandle of node \"%s\"", nodename);
754 return -1;
755 }
756
757 return phandle;
758 }
759
760 /*
761 * XICS legacy routines - to deprecate one day
762 */
763
764 static int ics_find_free_block(ICSState *ics, int num, int alignnum)
765 {
766 int first, i;
767
768 for (first = 0; first < ics->nr_irqs; first += alignnum) {
769 if (num > (ics->nr_irqs - first)) {
770 return -1;
771 }
772 for (i = first; i < first + num; ++i) {
773 if (!ICS_IRQ_FREE(ics, i)) {
774 break;
775 }
776 }
777 if (i == (first + num)) {
778 return first;
779 }
780 }
781
782 return -1;
783 }
784
785 int spapr_irq_find(SpaprMachineState *spapr, int num, bool align, Error **errp)
786 {
787 ICSState *ics = spapr->ics;
788 int first = -1;
789
790 assert(ics);
791
792 /*
793 * MSIMesage::data is used for storing VIRQ so
794 * it has to be aligned to num to support multiple
795 * MSI vectors. MSI-X is not affected by this.
796 * The hint is used for the first IRQ, the rest should
797 * be allocated continuously.
798 */
799 if (align) {
800 assert((num == 1) || (num == 2) || (num == 4) ||
801 (num == 8) || (num == 16) || (num == 32));
802 first = ics_find_free_block(ics, num, num);
803 } else {
804 first = ics_find_free_block(ics, num, 1);
805 }
806
807 if (first < 0) {
808 error_setg(errp, "can't find a free %d-IRQ block", num);
809 return -1;
810 }
811
812 return first + ics->offset;
813 }
814
815 #define SPAPR_IRQ_XICS_LEGACY_NR_IRQS 0x400
816
817 SpaprIrq spapr_irq_xics_legacy = {
818 .nr_irqs = SPAPR_IRQ_XICS_LEGACY_NR_IRQS,
819 .nr_msis = SPAPR_IRQ_XICS_LEGACY_NR_IRQS,
820 .ov5 = SPAPR_OV5_XIVE_LEGACY,
821
822 .init = spapr_irq_init_xics,
823 .claim = spapr_irq_claim_xics,
824 .free = spapr_irq_free_xics,
825 .qirq = spapr_qirq_xics,
826 .print_info = spapr_irq_print_info_xics,
827 .dt_populate = spapr_dt_xics,
828 .cpu_intc_create = spapr_irq_cpu_intc_create_xics,
829 .post_load = spapr_irq_post_load_xics,
830 .reset = spapr_irq_reset_xics,
831 .set_irq = spapr_irq_set_irq_xics,
832 .get_nodename = spapr_irq_get_nodename_xics,
833 .init_kvm = spapr_irq_init_kvm_xics,
834 };