]> git.proxmox.com Git - mirror_qemu.git/blame - hw/ppc/spapr_irq.c
xics: Handle KVM interrupt presentation from "simple" ICS code
[mirror_qemu.git] / hw / ppc / spapr_irq.c
CommitLineData
82cffa2e
CLG
1/*
2 * QEMU PowerPC sPAPR IRQ interface
3 *
4 * Copyright (c) 2018, IBM Corporation.
5 *
6 * This code is licensed under the GPL version 2 or later. See the
7 * COPYING file in the top-level directory.
8 */
9
10#include "qemu/osdep.h"
11#include "qemu/log.h"
12#include "qemu/error-report.h"
13#include "qapi/error.h"
14#include "hw/ppc/spapr.h"
a28b9a5a 15#include "hw/ppc/spapr_cpu_core.h"
dcc345b6 16#include "hw/ppc/spapr_xive.h"
82cffa2e 17#include "hw/ppc/xics.h"
a51d5afc 18#include "hw/ppc/xics_spapr.h"
ef01ed9d
CLG
19#include "sysemu/kvm.h"
20
21#include "trace.h"
82cffa2e
CLG
22
23void spapr_irq_msi_init(sPAPRMachineState *spapr, uint32_t nr_msis)
24{
25 spapr->irq_map_nr = nr_msis;
26 spapr->irq_map = bitmap_new(spapr->irq_map_nr);
27}
28
29int spapr_irq_msi_alloc(sPAPRMachineState *spapr, uint32_t num, bool align,
30 Error **errp)
31{
32 int irq;
33
34 /*
35 * The 'align_mask' parameter of bitmap_find_next_zero_area()
36 * should be one less than a power of 2; 0 means no
37 * alignment. Adapt the 'align' value of the former allocator
38 * to fit the requirements of bitmap_find_next_zero_area()
39 */
40 align -= 1;
41
42 irq = bitmap_find_next_zero_area(spapr->irq_map, spapr->irq_map_nr, 0, num,
43 align);
44 if (irq == spapr->irq_map_nr) {
45 error_setg(errp, "can't find a free %d-IRQ block", num);
46 return -1;
47 }
48
49 bitmap_set(spapr->irq_map, irq, num);
50
51 return irq + SPAPR_IRQ_MSI;
52}
53
54void spapr_irq_msi_free(sPAPRMachineState *spapr, int irq, uint32_t num)
55{
56 bitmap_clear(spapr->irq_map, irq - SPAPR_IRQ_MSI, num);
57}
58
59void spapr_irq_msi_reset(sPAPRMachineState *spapr)
60{
61 bitmap_clear(spapr->irq_map, 0, spapr->irq_map_nr);
62}
ef01ed9d
CLG
63
64
65/*
66 * XICS IRQ backend.
67 */
68
69static ICSState *spapr_ics_create(sPAPRMachineState *spapr,
70 const char *type_ics,
71 int nr_irqs, Error **errp)
72{
73 Error *local_err = NULL;
74 Object *obj;
75
76 obj = object_new(type_ics);
77 object_property_add_child(OBJECT(spapr), "ics", obj, &error_abort);
78 object_property_add_const_link(obj, ICS_PROP_XICS, OBJECT(spapr),
79 &error_abort);
80 object_property_set_int(obj, nr_irqs, "nr-irqs", &local_err);
81 if (local_err) {
82 goto error;
83 }
84 object_property_set_bool(obj, true, "realized", &local_err);
85 if (local_err) {
86 goto error;
87 }
88
89 return ICS_BASE(obj);
90
91error:
92 error_propagate(errp, local_err);
93 return NULL;
94}
95
2e66cdb7
CLG
96static void spapr_irq_init_xics(sPAPRMachineState *spapr, int nr_irqs,
97 Error **errp)
ef01ed9d
CLG
98{
99 MachineState *machine = MACHINE(spapr);
ef01ed9d
CLG
100 Error *local_err = NULL;
101
ef01ed9d
CLG
102 if (kvm_enabled()) {
103 if (machine_kernel_irqchip_allowed(machine) &&
104 !xics_kvm_init(spapr, &local_err)) {
ef01ed9d
CLG
105 spapr->ics = spapr_ics_create(spapr, TYPE_ICS_KVM, nr_irqs,
106 &local_err);
107 }
108 if (machine_kernel_irqchip_required(machine) && !spapr->ics) {
109 error_prepend(&local_err,
110 "kernel_irqchip requested but unavailable: ");
111 goto error;
112 }
113 error_free(local_err);
114 local_err = NULL;
115 }
116
117 if (!spapr->ics) {
118 xics_spapr_init(spapr);
ef01ed9d
CLG
119 spapr->ics = spapr_ics_create(spapr, TYPE_ICS_SIMPLE, nr_irqs,
120 &local_err);
121 }
122
123error:
124 error_propagate(errp, local_err);
125}
126
127#define ICS_IRQ_FREE(ics, srcno) \
128 (!((ics)->irqs[(srcno)].flags & (XICS_FLAGS_IRQ_MASK)))
129
130static int spapr_irq_claim_xics(sPAPRMachineState *spapr, int irq, bool lsi,
131 Error **errp)
132{
133 ICSState *ics = spapr->ics;
134
135 assert(ics);
136
137 if (!ics_valid_irq(ics, irq)) {
138 error_setg(errp, "IRQ %d is invalid", irq);
139 return -1;
140 }
141
142 if (!ICS_IRQ_FREE(ics, irq - ics->offset)) {
143 error_setg(errp, "IRQ %d is not free", irq);
144 return -1;
145 }
146
147 ics_set_irq_type(ics, irq - ics->offset, lsi);
148 return 0;
149}
150
151static void spapr_irq_free_xics(sPAPRMachineState *spapr, int irq, int num)
152{
153 ICSState *ics = spapr->ics;
154 uint32_t srcno = irq - ics->offset;
155 int i;
156
157 if (ics_valid_irq(ics, irq)) {
158 trace_spapr_irq_free(0, irq, num);
159 for (i = srcno; i < srcno + num; ++i) {
160 if (ICS_IRQ_FREE(ics, i)) {
161 trace_spapr_irq_free_warn(0, i);
162 }
163 memset(&ics->irqs[i], 0, sizeof(ICSIRQState));
164 }
165 }
166}
167
168static qemu_irq spapr_qirq_xics(sPAPRMachineState *spapr, int irq)
169{
170 ICSState *ics = spapr->ics;
171 uint32_t srcno = irq - ics->offset;
172
173 if (ics_valid_irq(ics, irq)) {
872ff3de 174 return spapr->qirqs[srcno];
ef01ed9d
CLG
175 }
176
177 return NULL;
178}
179
180static void spapr_irq_print_info_xics(sPAPRMachineState *spapr, Monitor *mon)
181{
182 CPUState *cs;
183
184 CPU_FOREACH(cs) {
185 PowerPCCPU *cpu = POWERPC_CPU(cs);
186
a28b9a5a 187 icp_pic_print_info(spapr_cpu_state(cpu)->icp, mon);
ef01ed9d
CLG
188 }
189
190 ics_pic_print_info(spapr->ics, mon);
191}
192
8fa1f4ef
CLG
193static void spapr_irq_cpu_intc_create_xics(sPAPRMachineState *spapr,
194 PowerPCCPU *cpu, Error **errp)
1a937ad7 195{
8fa1f4ef
CLG
196 Error *local_err = NULL;
197 Object *obj;
a28b9a5a 198 sPAPRCPUState *spapr_cpu = spapr_cpu_state(cpu);
8fa1f4ef 199
56af6656 200 obj = icp_create(OBJECT(cpu), TYPE_ICP, XICS_FABRIC(spapr),
8fa1f4ef
CLG
201 &local_err);
202 if (local_err) {
203 error_propagate(errp, local_err);
204 return;
205 }
206
a28b9a5a 207 spapr_cpu->icp = ICP(obj);
1a937ad7
CLG
208}
209
1c53b06c
CLG
210static int spapr_irq_post_load_xics(sPAPRMachineState *spapr, int version_id)
211{
212 if (!object_dynamic_cast(OBJECT(spapr->ics), TYPE_ICS_KVM)) {
213 CPUState *cs;
214 CPU_FOREACH(cs) {
215 PowerPCCPU *cpu = POWERPC_CPU(cs);
a28b9a5a 216 icp_resend(spapr_cpu_state(cpu)->icp);
1c53b06c
CLG
217 }
218 }
219 return 0;
220}
221
872ff3de
CLG
222static void spapr_irq_set_irq_xics(void *opaque, int srcno, int val)
223{
224 sPAPRMachineState *spapr = opaque;
872ff3de 225
557b4567 226 ics_simple_set_irq(spapr->ics, srcno, val);
872ff3de
CLG
227}
228
13db0cd9
CLG
229static void spapr_irq_reset_xics(sPAPRMachineState *spapr, Error **errp)
230{
231 /* TODO: create the KVM XICS device */
232}
233
ae837402 234#define SPAPR_IRQ_XICS_NR_IRQS 0x1000
e39de895
CLG
235#define SPAPR_IRQ_XICS_NR_MSIS \
236 (XICS_IRQ_BASE + SPAPR_IRQ_XICS_NR_IRQS - SPAPR_IRQ_MSI)
237
ef01ed9d 238sPAPRIrq spapr_irq_xics = {
e39de895
CLG
239 .nr_irqs = SPAPR_IRQ_XICS_NR_IRQS,
240 .nr_msis = SPAPR_IRQ_XICS_NR_MSIS,
db592b5b 241 .ov5 = SPAPR_OV5_XIVE_LEGACY,
ef01ed9d
CLG
242
243 .init = spapr_irq_init_xics,
244 .claim = spapr_irq_claim_xics,
245 .free = spapr_irq_free_xics,
246 .qirq = spapr_qirq_xics,
247 .print_info = spapr_irq_print_info_xics,
6e21de4a 248 .dt_populate = spapr_dt_xics,
1a937ad7 249 .cpu_intc_create = spapr_irq_cpu_intc_create_xics,
1c53b06c 250 .post_load = spapr_irq_post_load_xics,
13db0cd9 251 .reset = spapr_irq_reset_xics,
872ff3de 252 .set_irq = spapr_irq_set_irq_xics,
ef01ed9d
CLG
253};
254
dcc345b6
CLG
255/*
256 * XIVE IRQ backend.
257 */
2e66cdb7
CLG
258static void spapr_irq_init_xive(sPAPRMachineState *spapr, int nr_irqs,
259 Error **errp)
dcc345b6
CLG
260{
261 MachineState *machine = MACHINE(spapr);
dcc345b6
CLG
262 uint32_t nr_servers = spapr_max_server_number(spapr);
263 DeviceState *dev;
264 int i;
265
266 /* KVM XIVE device not yet available */
267 if (kvm_enabled()) {
268 if (machine_kernel_irqchip_required(machine)) {
269 error_setg(errp, "kernel_irqchip requested. no KVM XIVE support");
270 return;
271 }
272 }
273
274 dev = qdev_create(NULL, TYPE_SPAPR_XIVE);
2e66cdb7 275 qdev_prop_set_uint32(dev, "nr-irqs", nr_irqs);
dcc345b6
CLG
276 /*
277 * 8 XIVE END structures per CPU. One for each available priority
278 */
279 qdev_prop_set_uint32(dev, "nr-ends", nr_servers << 3);
280 qdev_init_nofail(dev);
281
282 spapr->xive = SPAPR_XIVE(dev);
283
284 /* Enable the CPU IPIs */
285 for (i = 0; i < nr_servers; ++i) {
286 spapr_xive_irq_claim(spapr->xive, SPAPR_IRQ_IPI + i, false);
287 }
23bcd5eb
CLG
288
289 spapr_xive_hcall_init(spapr);
dcc345b6
CLG
290}
291
292static int spapr_irq_claim_xive(sPAPRMachineState *spapr, int irq, bool lsi,
293 Error **errp)
294{
295 if (!spapr_xive_irq_claim(spapr->xive, irq, lsi)) {
296 error_setg(errp, "IRQ %d is invalid", irq);
297 return -1;
298 }
299 return 0;
300}
301
302static void spapr_irq_free_xive(sPAPRMachineState *spapr, int irq, int num)
303{
304 int i;
305
306 for (i = irq; i < irq + num; ++i) {
307 spapr_xive_irq_free(spapr->xive, i);
308 }
309}
310
311static qemu_irq spapr_qirq_xive(sPAPRMachineState *spapr, int irq)
312{
a0c493ae 313 sPAPRXive *xive = spapr->xive;
a0c493ae
CLG
314
315 if (irq >= xive->nr_irqs) {
316 return NULL;
317 }
318
319 /* The sPAPR machine/device should have claimed the IRQ before */
320 assert(xive_eas_is_valid(&xive->eat[irq]));
321
872ff3de 322 return spapr->qirqs[irq];
dcc345b6
CLG
323}
324
325static void spapr_irq_print_info_xive(sPAPRMachineState *spapr,
326 Monitor *mon)
327{
328 CPUState *cs;
329
330 CPU_FOREACH(cs) {
331 PowerPCCPU *cpu = POWERPC_CPU(cs);
332
a28b9a5a 333 xive_tctx_pic_print_info(spapr_cpu_state(cpu)->tctx, mon);
dcc345b6
CLG
334 }
335
336 spapr_xive_pic_print_info(spapr->xive, mon);
337}
338
8fa1f4ef
CLG
339static void spapr_irq_cpu_intc_create_xive(sPAPRMachineState *spapr,
340 PowerPCCPU *cpu, Error **errp)
1a937ad7 341{
8fa1f4ef
CLG
342 Error *local_err = NULL;
343 Object *obj;
a28b9a5a 344 sPAPRCPUState *spapr_cpu = spapr_cpu_state(cpu);
8fa1f4ef
CLG
345
346 obj = xive_tctx_create(OBJECT(cpu), XIVE_ROUTER(spapr->xive), &local_err);
347 if (local_err) {
348 error_propagate(errp, local_err);
349 return;
350 }
351
a28b9a5a 352 spapr_cpu->tctx = XIVE_TCTX(obj);
b2e22477
CLG
353
354 /*
355 * (TCG) Early setting the OS CAM line for hotplugged CPUs as they
8fa1f4ef 356 * don't beneficiate from the reset of the XIVE IRQ backend
b2e22477 357 */
a28b9a5a 358 spapr_xive_set_tctx_os_cam(spapr_cpu->tctx);
1a937ad7
CLG
359}
360
1c53b06c
CLG
361static int spapr_irq_post_load_xive(sPAPRMachineState *spapr, int version_id)
362{
363 return 0;
364}
365
b2e22477
CLG
366static void spapr_irq_reset_xive(sPAPRMachineState *spapr, Error **errp)
367{
368 CPUState *cs;
369
370 CPU_FOREACH(cs) {
371 PowerPCCPU *cpu = POWERPC_CPU(cs);
372
373 /* (TCG) Set the OS CAM line of the thread interrupt context. */
a28b9a5a 374 spapr_xive_set_tctx_os_cam(spapr_cpu_state(cpu)->tctx);
b2e22477 375 }
3a8eb78e
CLG
376
377 /* Activate the XIVE MMIOs */
378 spapr_xive_mmio_set_enabled(spapr->xive, true);
b2e22477
CLG
379}
380
872ff3de
CLG
381static void spapr_irq_set_irq_xive(void *opaque, int srcno, int val)
382{
383 sPAPRMachineState *spapr = opaque;
384
385 xive_source_set_irq(&spapr->xive->source, srcno, val);
386}
387
dcc345b6
CLG
388/*
389 * XIVE uses the full IRQ number space. Set it to 8K to be compatible
390 * with XICS.
391 */
392
393#define SPAPR_IRQ_XIVE_NR_IRQS 0x2000
394#define SPAPR_IRQ_XIVE_NR_MSIS (SPAPR_IRQ_XIVE_NR_IRQS - SPAPR_IRQ_MSI)
395
396sPAPRIrq spapr_irq_xive = {
397 .nr_irqs = SPAPR_IRQ_XIVE_NR_IRQS,
398 .nr_msis = SPAPR_IRQ_XIVE_NR_MSIS,
db592b5b 399 .ov5 = SPAPR_OV5_XIVE_EXPLOIT,
dcc345b6
CLG
400
401 .init = spapr_irq_init_xive,
402 .claim = spapr_irq_claim_xive,
403 .free = spapr_irq_free_xive,
404 .qirq = spapr_qirq_xive,
405 .print_info = spapr_irq_print_info_xive,
6e21de4a 406 .dt_populate = spapr_dt_xive,
1a937ad7 407 .cpu_intc_create = spapr_irq_cpu_intc_create_xive,
1c53b06c 408 .post_load = spapr_irq_post_load_xive,
b2e22477 409 .reset = spapr_irq_reset_xive,
872ff3de 410 .set_irq = spapr_irq_set_irq_xive,
dcc345b6
CLG
411};
412
13db0cd9
CLG
413/*
414 * Dual XIVE and XICS IRQ backend.
415 *
416 * Both interrupt mode, XIVE and XICS, objects are created but the
417 * machine starts in legacy interrupt mode (XICS). It can be changed
418 * by the CAS negotiation process and, in that case, the new mode is
419 * activated after an extra machine reset.
420 */
421
422/*
423 * Returns the sPAPR IRQ backend negotiated by CAS. XICS is the
424 * default.
425 */
426static sPAPRIrq *spapr_irq_current(sPAPRMachineState *spapr)
427{
428 return spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT) ?
429 &spapr_irq_xive : &spapr_irq_xics;
430}
431
2e66cdb7
CLG
432static void spapr_irq_init_dual(sPAPRMachineState *spapr, int nr_irqs,
433 Error **errp)
13db0cd9
CLG
434{
435 MachineState *machine = MACHINE(spapr);
436 Error *local_err = NULL;
437
438 if (kvm_enabled() && machine_kernel_irqchip_allowed(machine)) {
439 error_setg(errp, "No KVM support for the 'dual' machine");
440 return;
441 }
442
2e66cdb7 443 spapr_irq_xics.init(spapr, spapr_irq_xics.nr_irqs, &local_err);
13db0cd9
CLG
444 if (local_err) {
445 error_propagate(errp, local_err);
446 return;
447 }
448
2e66cdb7 449 spapr_irq_xive.init(spapr, spapr_irq_xive.nr_irqs, &local_err);
13db0cd9
CLG
450 if (local_err) {
451 error_propagate(errp, local_err);
452 return;
453 }
454}
455
456static int spapr_irq_claim_dual(sPAPRMachineState *spapr, int irq, bool lsi,
457 Error **errp)
458{
459 Error *local_err = NULL;
460 int ret;
461
462 ret = spapr_irq_xics.claim(spapr, irq, lsi, &local_err);
463 if (local_err) {
464 error_propagate(errp, local_err);
465 return ret;
466 }
467
468 ret = spapr_irq_xive.claim(spapr, irq, lsi, &local_err);
469 if (local_err) {
470 error_propagate(errp, local_err);
471 return ret;
472 }
473
474 return ret;
475}
476
477static void spapr_irq_free_dual(sPAPRMachineState *spapr, int irq, int num)
478{
479 spapr_irq_xics.free(spapr, irq, num);
480 spapr_irq_xive.free(spapr, irq, num);
481}
482
483static qemu_irq spapr_qirq_dual(sPAPRMachineState *spapr, int irq)
484{
3a0d802c 485 return spapr_irq_current(spapr)->qirq(spapr, irq);
13db0cd9
CLG
486}
487
488static void spapr_irq_print_info_dual(sPAPRMachineState *spapr, Monitor *mon)
489{
490 spapr_irq_current(spapr)->print_info(spapr, mon);
491}
492
493static void spapr_irq_dt_populate_dual(sPAPRMachineState *spapr,
494 uint32_t nr_servers, void *fdt,
495 uint32_t phandle)
496{
497 spapr_irq_current(spapr)->dt_populate(spapr, nr_servers, fdt, phandle);
498}
499
500static void spapr_irq_cpu_intc_create_dual(sPAPRMachineState *spapr,
501 PowerPCCPU *cpu, Error **errp)
502{
503 Error *local_err = NULL;
504
505 spapr_irq_xive.cpu_intc_create(spapr, cpu, &local_err);
506 if (local_err) {
507 error_propagate(errp, local_err);
508 return;
509 }
510
511 spapr_irq_xics.cpu_intc_create(spapr, cpu, errp);
512}
513
514static int spapr_irq_post_load_dual(sPAPRMachineState *spapr, int version_id)
515{
516 /*
517 * Force a reset of the XIVE backend after migration. The machine
518 * defaults to XICS at startup.
519 */
520 if (spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
521 spapr_irq_xive.reset(spapr, &error_fatal);
522 }
523
524 return spapr_irq_current(spapr)->post_load(spapr, version_id);
525}
526
527static void spapr_irq_reset_dual(sPAPRMachineState *spapr, Error **errp)
528{
3a8eb78e
CLG
529 /*
530 * Deactivate the XIVE MMIOs. The XIVE backend will reenable them
531 * if selected.
532 */
533 spapr_xive_mmio_set_enabled(spapr->xive, false);
534
13db0cd9
CLG
535 spapr_irq_current(spapr)->reset(spapr, errp);
536}
537
538static void spapr_irq_set_irq_dual(void *opaque, int srcno, int val)
539{
540 sPAPRMachineState *spapr = opaque;
541
542 spapr_irq_current(spapr)->set_irq(spapr, srcno, val);
543}
544
545/*
546 * Define values in sync with the XIVE and XICS backend
547 */
548#define SPAPR_IRQ_DUAL_NR_IRQS 0x2000
549#define SPAPR_IRQ_DUAL_NR_MSIS (SPAPR_IRQ_DUAL_NR_IRQS - SPAPR_IRQ_MSI)
550
551sPAPRIrq spapr_irq_dual = {
552 .nr_irqs = SPAPR_IRQ_DUAL_NR_IRQS,
553 .nr_msis = SPAPR_IRQ_DUAL_NR_MSIS,
554 .ov5 = SPAPR_OV5_XIVE_BOTH,
555
556 .init = spapr_irq_init_dual,
557 .claim = spapr_irq_claim_dual,
558 .free = spapr_irq_free_dual,
559 .qirq = spapr_qirq_dual,
560 .print_info = spapr_irq_print_info_dual,
561 .dt_populate = spapr_irq_dt_populate_dual,
562 .cpu_intc_create = spapr_irq_cpu_intc_create_dual,
563 .post_load = spapr_irq_post_load_dual,
564 .reset = spapr_irq_reset_dual,
565 .set_irq = spapr_irq_set_irq_dual
566};
567
ef01ed9d
CLG
568/*
569 * sPAPR IRQ frontend routines for devices
570 */
fab397d8
CLG
571void spapr_irq_init(sPAPRMachineState *spapr, Error **errp)
572{
1a511340
GK
573 MachineState *machine = MACHINE(spapr);
574
575 if (machine_kernel_irqchip_split(machine)) {
576 error_setg(errp, "kernel_irqchip split mode not supported on pseries");
577 return;
578 }
579
580 if (!kvm_enabled() && machine_kernel_irqchip_required(machine)) {
581 error_setg(errp,
582 "kernel_irqchip requested but only available with KVM");
583 return;
584 }
585
fab397d8
CLG
586 /* Initialize the MSI IRQ allocator. */
587 if (!SPAPR_MACHINE_GET_CLASS(spapr)->legacy_irq_allocation) {
3ba3d0bc 588 spapr_irq_msi_init(spapr, spapr->irq->nr_msis);
fab397d8
CLG
589 }
590
2e66cdb7 591 spapr->irq->init(spapr, spapr->irq->nr_irqs, errp);
872ff3de
CLG
592
593 spapr->qirqs = qemu_allocate_irqs(spapr->irq->set_irq, spapr,
594 spapr->irq->nr_irqs);
fab397d8 595}
ef01ed9d
CLG
596
597int spapr_irq_claim(sPAPRMachineState *spapr, int irq, bool lsi, Error **errp)
598{
3ba3d0bc 599 return spapr->irq->claim(spapr, irq, lsi, errp);
ef01ed9d
CLG
600}
601
602void spapr_irq_free(sPAPRMachineState *spapr, int irq, int num)
603{
3ba3d0bc 604 spapr->irq->free(spapr, irq, num);
ef01ed9d
CLG
605}
606
607qemu_irq spapr_qirq(sPAPRMachineState *spapr, int irq)
608{
3ba3d0bc 609 return spapr->irq->qirq(spapr, irq);
ef01ed9d
CLG
610}
611
1c53b06c
CLG
612int spapr_irq_post_load(sPAPRMachineState *spapr, int version_id)
613{
3ba3d0bc 614 return spapr->irq->post_load(spapr, version_id);
1c53b06c
CLG
615}
616
b2e22477
CLG
617void spapr_irq_reset(sPAPRMachineState *spapr, Error **errp)
618{
3ba3d0bc
CLG
619 if (spapr->irq->reset) {
620 spapr->irq->reset(spapr, errp);
b2e22477
CLG
621 }
622}
623
ef01ed9d
CLG
624/*
625 * XICS legacy routines - to deprecate one day
626 */
627
628static int ics_find_free_block(ICSState *ics, int num, int alignnum)
629{
630 int first, i;
631
632 for (first = 0; first < ics->nr_irqs; first += alignnum) {
633 if (num > (ics->nr_irqs - first)) {
634 return -1;
635 }
636 for (i = first; i < first + num; ++i) {
637 if (!ICS_IRQ_FREE(ics, i)) {
638 break;
639 }
640 }
641 if (i == (first + num)) {
642 return first;
643 }
644 }
645
646 return -1;
647}
648
649int spapr_irq_find(sPAPRMachineState *spapr, int num, bool align, Error **errp)
650{
651 ICSState *ics = spapr->ics;
652 int first = -1;
653
654 assert(ics);
655
656 /*
657 * MSIMesage::data is used for storing VIRQ so
658 * it has to be aligned to num to support multiple
659 * MSI vectors. MSI-X is not affected by this.
660 * The hint is used for the first IRQ, the rest should
661 * be allocated continuously.
662 */
663 if (align) {
664 assert((num == 1) || (num == 2) || (num == 4) ||
665 (num == 8) || (num == 16) || (num == 32));
666 first = ics_find_free_block(ics, num, num);
667 } else {
668 first = ics_find_free_block(ics, num, 1);
669 }
670
671 if (first < 0) {
672 error_setg(errp, "can't find a free %d-IRQ block", num);
673 return -1;
674 }
675
676 return first + ics->offset;
677}
ae837402
CLG
678
679#define SPAPR_IRQ_XICS_LEGACY_NR_IRQS 0x400
680
681sPAPRIrq spapr_irq_xics_legacy = {
682 .nr_irqs = SPAPR_IRQ_XICS_LEGACY_NR_IRQS,
683 .nr_msis = SPAPR_IRQ_XICS_LEGACY_NR_IRQS,
db592b5b 684 .ov5 = SPAPR_OV5_XIVE_LEGACY,
ae837402
CLG
685
686 .init = spapr_irq_init_xics,
687 .claim = spapr_irq_claim_xics,
688 .free = spapr_irq_free_xics,
689 .qirq = spapr_qirq_xics,
690 .print_info = spapr_irq_print_info_xics,
6e21de4a 691 .dt_populate = spapr_dt_xics,
1a937ad7 692 .cpu_intc_create = spapr_irq_cpu_intc_create_xics,
1c53b06c 693 .post_load = spapr_irq_post_load_xics,
872ff3de 694 .set_irq = spapr_irq_set_irq_xics,
ae837402 695};