]> git.proxmox.com Git - mirror_qemu.git/blob - hw/ppc/spapr_irq.c
spapr: Handle freeing of multiple irqs in frontend only
[mirror_qemu.git] / hw / ppc / spapr_irq.c
1 /*
2 * QEMU PowerPC sPAPR IRQ interface
3 *
4 * Copyright (c) 2018, IBM Corporation.
5 *
6 * This code is licensed under the GPL version 2 or later. See the
7 * COPYING file in the top-level directory.
8 */
9
10 #include "qemu/osdep.h"
11 #include "qemu/log.h"
12 #include "qemu/error-report.h"
13 #include "qapi/error.h"
14 #include "hw/irq.h"
15 #include "hw/ppc/spapr.h"
16 #include "hw/ppc/spapr_cpu_core.h"
17 #include "hw/ppc/spapr_xive.h"
18 #include "hw/ppc/xics.h"
19 #include "hw/ppc/xics_spapr.h"
20 #include "hw/qdev-properties.h"
21 #include "cpu-models.h"
22 #include "sysemu/kvm.h"
23
24 #include "trace.h"
25
26 void spapr_irq_msi_init(SpaprMachineState *spapr, uint32_t nr_msis)
27 {
28 spapr->irq_map_nr = nr_msis;
29 spapr->irq_map = bitmap_new(spapr->irq_map_nr);
30 }
31
32 int spapr_irq_msi_alloc(SpaprMachineState *spapr, uint32_t num, bool align,
33 Error **errp)
34 {
35 int irq;
36
37 /*
38 * The 'align_mask' parameter of bitmap_find_next_zero_area()
39 * should be one less than a power of 2; 0 means no
40 * alignment. Adapt the 'align' value of the former allocator
41 * to fit the requirements of bitmap_find_next_zero_area()
42 */
43 align -= 1;
44
45 irq = bitmap_find_next_zero_area(spapr->irq_map, spapr->irq_map_nr, 0, num,
46 align);
47 if (irq == spapr->irq_map_nr) {
48 error_setg(errp, "can't find a free %d-IRQ block", num);
49 return -1;
50 }
51
52 bitmap_set(spapr->irq_map, irq, num);
53
54 return irq + SPAPR_IRQ_MSI;
55 }
56
57 void spapr_irq_msi_free(SpaprMachineState *spapr, int irq, uint32_t num)
58 {
59 bitmap_clear(spapr->irq_map, irq - SPAPR_IRQ_MSI, num);
60 }
61
62 static void spapr_irq_init_kvm(SpaprMachineState *spapr,
63 SpaprIrq *irq, Error **errp)
64 {
65 MachineState *machine = MACHINE(spapr);
66 Error *local_err = NULL;
67
68 if (kvm_enabled() && machine_kernel_irqchip_allowed(machine)) {
69 irq->init_kvm(spapr, &local_err);
70 if (local_err && machine_kernel_irqchip_required(machine)) {
71 error_prepend(&local_err,
72 "kernel_irqchip requested but unavailable: ");
73 error_propagate(errp, local_err);
74 return;
75 }
76
77 if (!local_err) {
78 return;
79 }
80
81 /*
82 * We failed to initialize the KVM device, fallback to
83 * emulated mode
84 */
85 error_prepend(&local_err, "kernel_irqchip allowed but unavailable: ");
86 error_append_hint(&local_err, "Falling back to kernel-irqchip=off\n");
87 warn_report_err(local_err);
88 }
89 }
90
91 /*
92 * XICS IRQ backend.
93 */
94
95 static void spapr_irq_init_xics(SpaprMachineState *spapr, Error **errp)
96 {
97 Object *obj;
98 Error *local_err = NULL;
99
100 obj = object_new(TYPE_ICS_SPAPR);
101 object_property_add_child(OBJECT(spapr), "ics", obj, &error_abort);
102 object_property_add_const_link(obj, ICS_PROP_XICS, OBJECT(spapr),
103 &error_fatal);
104 object_property_set_int(obj, spapr->irq->nr_xirqs,
105 "nr-irqs", &error_fatal);
106 object_property_set_bool(obj, true, "realized", &local_err);
107 if (local_err) {
108 error_propagate(errp, local_err);
109 return;
110 }
111
112 spapr->ics = ICS_SPAPR(obj);
113 }
114
115 static int spapr_irq_claim_xics(SpaprMachineState *spapr, int irq, bool lsi,
116 Error **errp)
117 {
118 ICSState *ics = spapr->ics;
119
120 assert(ics);
121
122 if (!ics_valid_irq(ics, irq)) {
123 error_setg(errp, "IRQ %d is invalid", irq);
124 return -1;
125 }
126
127 if (!ics_irq_free(ics, irq - ics->offset)) {
128 error_setg(errp, "IRQ %d is not free", irq);
129 return -1;
130 }
131
132 ics_set_irq_type(ics, irq - ics->offset, lsi);
133 return 0;
134 }
135
136 static void spapr_irq_free_xics(SpaprMachineState *spapr, int irq)
137 {
138 ICSState *ics = spapr->ics;
139 uint32_t srcno = irq - ics->offset;
140
141 if (ics_valid_irq(ics, irq)) {
142 memset(&ics->irqs[srcno], 0, sizeof(ICSIRQState));
143 }
144 }
145
146 static void spapr_irq_print_info_xics(SpaprMachineState *spapr, Monitor *mon)
147 {
148 CPUState *cs;
149
150 CPU_FOREACH(cs) {
151 PowerPCCPU *cpu = POWERPC_CPU(cs);
152
153 icp_pic_print_info(spapr_cpu_state(cpu)->icp, mon);
154 }
155
156 ics_pic_print_info(spapr->ics, mon);
157 }
158
159 static void spapr_irq_cpu_intc_create_xics(SpaprMachineState *spapr,
160 PowerPCCPU *cpu, Error **errp)
161 {
162 Error *local_err = NULL;
163 Object *obj;
164 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
165
166 obj = icp_create(OBJECT(cpu), TYPE_ICP, XICS_FABRIC(spapr),
167 &local_err);
168 if (local_err) {
169 error_propagate(errp, local_err);
170 return;
171 }
172
173 spapr_cpu->icp = ICP(obj);
174 }
175
176 static int spapr_irq_post_load_xics(SpaprMachineState *spapr, int version_id)
177 {
178 if (!kvm_irqchip_in_kernel()) {
179 CPUState *cs;
180 CPU_FOREACH(cs) {
181 PowerPCCPU *cpu = POWERPC_CPU(cs);
182 icp_resend(spapr_cpu_state(cpu)->icp);
183 }
184 }
185 return 0;
186 }
187
188 static void spapr_irq_set_irq_xics(void *opaque, int irq, int val)
189 {
190 SpaprMachineState *spapr = opaque;
191 uint32_t srcno = irq - spapr->ics->offset;
192
193 ics_set_irq(spapr->ics, srcno, val);
194 }
195
196 static void spapr_irq_reset_xics(SpaprMachineState *spapr, Error **errp)
197 {
198 Error *local_err = NULL;
199
200 spapr_irq_init_kvm(spapr, &spapr_irq_xics, &local_err);
201 if (local_err) {
202 error_propagate(errp, local_err);
203 return;
204 }
205 }
206
207 static void spapr_irq_init_kvm_xics(SpaprMachineState *spapr, Error **errp)
208 {
209 if (kvm_enabled()) {
210 xics_kvm_connect(spapr, errp);
211 }
212 }
213
214 SpaprIrq spapr_irq_xics = {
215 .nr_xirqs = SPAPR_NR_XIRQS,
216 .nr_msis = SPAPR_NR_MSIS,
217 .ov5 = SPAPR_OV5_XIVE_LEGACY,
218
219 .init = spapr_irq_init_xics,
220 .claim = spapr_irq_claim_xics,
221 .free = spapr_irq_free_xics,
222 .print_info = spapr_irq_print_info_xics,
223 .dt_populate = spapr_dt_xics,
224 .cpu_intc_create = spapr_irq_cpu_intc_create_xics,
225 .post_load = spapr_irq_post_load_xics,
226 .reset = spapr_irq_reset_xics,
227 .set_irq = spapr_irq_set_irq_xics,
228 .init_kvm = spapr_irq_init_kvm_xics,
229 };
230
231 /*
232 * XIVE IRQ backend.
233 */
234 static void spapr_irq_init_xive(SpaprMachineState *spapr, Error **errp)
235 {
236 uint32_t nr_servers = spapr_max_server_number(spapr);
237 DeviceState *dev;
238 int i;
239
240 dev = qdev_create(NULL, TYPE_SPAPR_XIVE);
241 qdev_prop_set_uint32(dev, "nr-irqs",
242 spapr->irq->nr_xirqs + SPAPR_XIRQ_BASE);
243 /*
244 * 8 XIVE END structures per CPU. One for each available priority
245 */
246 qdev_prop_set_uint32(dev, "nr-ends", nr_servers << 3);
247 qdev_init_nofail(dev);
248
249 spapr->xive = SPAPR_XIVE(dev);
250
251 /* Enable the CPU IPIs */
252 for (i = 0; i < nr_servers; ++i) {
253 spapr_xive_irq_claim(spapr->xive, SPAPR_IRQ_IPI + i, false);
254 }
255
256 spapr_xive_hcall_init(spapr);
257 }
258
259 static int spapr_irq_claim_xive(SpaprMachineState *spapr, int irq, bool lsi,
260 Error **errp)
261 {
262 if (!spapr_xive_irq_claim(spapr->xive, irq, lsi)) {
263 error_setg(errp, "IRQ %d is invalid", irq);
264 return -1;
265 }
266 return 0;
267 }
268
269 static void spapr_irq_free_xive(SpaprMachineState *spapr, int irq)
270 {
271 spapr_xive_irq_free(spapr->xive, irq);
272 }
273
274 static void spapr_irq_print_info_xive(SpaprMachineState *spapr,
275 Monitor *mon)
276 {
277 CPUState *cs;
278
279 CPU_FOREACH(cs) {
280 PowerPCCPU *cpu = POWERPC_CPU(cs);
281
282 xive_tctx_pic_print_info(spapr_cpu_state(cpu)->tctx, mon);
283 }
284
285 spapr_xive_pic_print_info(spapr->xive, mon);
286 }
287
288 static void spapr_irq_cpu_intc_create_xive(SpaprMachineState *spapr,
289 PowerPCCPU *cpu, Error **errp)
290 {
291 Error *local_err = NULL;
292 Object *obj;
293 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
294
295 obj = xive_tctx_create(OBJECT(cpu), XIVE_ROUTER(spapr->xive), &local_err);
296 if (local_err) {
297 error_propagate(errp, local_err);
298 return;
299 }
300
301 spapr_cpu->tctx = XIVE_TCTX(obj);
302
303 /*
304 * (TCG) Early setting the OS CAM line for hotplugged CPUs as they
305 * don't beneficiate from the reset of the XIVE IRQ backend
306 */
307 spapr_xive_set_tctx_os_cam(spapr_cpu->tctx);
308 }
309
310 static int spapr_irq_post_load_xive(SpaprMachineState *spapr, int version_id)
311 {
312 return spapr_xive_post_load(spapr->xive, version_id);
313 }
314
315 static void spapr_irq_reset_xive(SpaprMachineState *spapr, Error **errp)
316 {
317 CPUState *cs;
318 Error *local_err = NULL;
319
320 CPU_FOREACH(cs) {
321 PowerPCCPU *cpu = POWERPC_CPU(cs);
322
323 /* (TCG) Set the OS CAM line of the thread interrupt context. */
324 spapr_xive_set_tctx_os_cam(spapr_cpu_state(cpu)->tctx);
325 }
326
327 spapr_irq_init_kvm(spapr, &spapr_irq_xive, &local_err);
328 if (local_err) {
329 error_propagate(errp, local_err);
330 return;
331 }
332
333 /* Activate the XIVE MMIOs */
334 spapr_xive_mmio_set_enabled(spapr->xive, true);
335 }
336
337 static void spapr_irq_set_irq_xive(void *opaque, int irq, int val)
338 {
339 SpaprMachineState *spapr = opaque;
340
341 if (kvm_irqchip_in_kernel()) {
342 kvmppc_xive_source_set_irq(&spapr->xive->source, irq, val);
343 } else {
344 xive_source_set_irq(&spapr->xive->source, irq, val);
345 }
346 }
347
348 static void spapr_irq_init_kvm_xive(SpaprMachineState *spapr, Error **errp)
349 {
350 if (kvm_enabled()) {
351 kvmppc_xive_connect(spapr->xive, errp);
352 }
353 }
354
355 SpaprIrq spapr_irq_xive = {
356 .nr_xirqs = SPAPR_NR_XIRQS,
357 .nr_msis = SPAPR_NR_MSIS,
358 .ov5 = SPAPR_OV5_XIVE_EXPLOIT,
359
360 .init = spapr_irq_init_xive,
361 .claim = spapr_irq_claim_xive,
362 .free = spapr_irq_free_xive,
363 .print_info = spapr_irq_print_info_xive,
364 .dt_populate = spapr_dt_xive,
365 .cpu_intc_create = spapr_irq_cpu_intc_create_xive,
366 .post_load = spapr_irq_post_load_xive,
367 .reset = spapr_irq_reset_xive,
368 .set_irq = spapr_irq_set_irq_xive,
369 .init_kvm = spapr_irq_init_kvm_xive,
370 };
371
372 /*
373 * Dual XIVE and XICS IRQ backend.
374 *
375 * Both interrupt mode, XIVE and XICS, objects are created but the
376 * machine starts in legacy interrupt mode (XICS). It can be changed
377 * by the CAS negotiation process and, in that case, the new mode is
378 * activated after an extra machine reset.
379 */
380
381 /*
382 * Returns the sPAPR IRQ backend negotiated by CAS. XICS is the
383 * default.
384 */
385 static SpaprIrq *spapr_irq_current(SpaprMachineState *spapr)
386 {
387 return spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT) ?
388 &spapr_irq_xive : &spapr_irq_xics;
389 }
390
391 static void spapr_irq_init_dual(SpaprMachineState *spapr, Error **errp)
392 {
393 Error *local_err = NULL;
394
395 spapr_irq_xics.init(spapr, &local_err);
396 if (local_err) {
397 error_propagate(errp, local_err);
398 return;
399 }
400
401 spapr_irq_xive.init(spapr, &local_err);
402 if (local_err) {
403 error_propagate(errp, local_err);
404 return;
405 }
406 }
407
408 static int spapr_irq_claim_dual(SpaprMachineState *spapr, int irq, bool lsi,
409 Error **errp)
410 {
411 Error *local_err = NULL;
412 int ret;
413
414 ret = spapr_irq_xics.claim(spapr, irq, lsi, &local_err);
415 if (local_err) {
416 error_propagate(errp, local_err);
417 return ret;
418 }
419
420 ret = spapr_irq_xive.claim(spapr, irq, lsi, &local_err);
421 if (local_err) {
422 error_propagate(errp, local_err);
423 return ret;
424 }
425
426 return ret;
427 }
428
429 static void spapr_irq_free_dual(SpaprMachineState *spapr, int irq)
430 {
431 spapr_irq_xics.free(spapr, irq);
432 spapr_irq_xive.free(spapr, irq);
433 }
434
435 static void spapr_irq_print_info_dual(SpaprMachineState *spapr, Monitor *mon)
436 {
437 spapr_irq_current(spapr)->print_info(spapr, mon);
438 }
439
440 static void spapr_irq_dt_populate_dual(SpaprMachineState *spapr,
441 uint32_t nr_servers, void *fdt,
442 uint32_t phandle)
443 {
444 spapr_irq_current(spapr)->dt_populate(spapr, nr_servers, fdt, phandle);
445 }
446
447 static void spapr_irq_cpu_intc_create_dual(SpaprMachineState *spapr,
448 PowerPCCPU *cpu, Error **errp)
449 {
450 Error *local_err = NULL;
451
452 spapr_irq_xive.cpu_intc_create(spapr, cpu, &local_err);
453 if (local_err) {
454 error_propagate(errp, local_err);
455 return;
456 }
457
458 spapr_irq_xics.cpu_intc_create(spapr, cpu, errp);
459 }
460
461 static int spapr_irq_post_load_dual(SpaprMachineState *spapr, int version_id)
462 {
463 /*
464 * Force a reset of the XIVE backend after migration. The machine
465 * defaults to XICS at startup.
466 */
467 if (spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
468 if (kvm_irqchip_in_kernel()) {
469 xics_kvm_disconnect(spapr, &error_fatal);
470 }
471 spapr_irq_xive.reset(spapr, &error_fatal);
472 }
473
474 return spapr_irq_current(spapr)->post_load(spapr, version_id);
475 }
476
477 static void spapr_irq_reset_dual(SpaprMachineState *spapr, Error **errp)
478 {
479 Error *local_err = NULL;
480
481 /*
482 * Deactivate the XIVE MMIOs. The XIVE backend will reenable them
483 * if selected.
484 */
485 spapr_xive_mmio_set_enabled(spapr->xive, false);
486
487 /* Destroy all KVM devices */
488 if (kvm_irqchip_in_kernel()) {
489 xics_kvm_disconnect(spapr, &local_err);
490 if (local_err) {
491 error_propagate(errp, local_err);
492 error_prepend(errp, "KVM XICS disconnect failed: ");
493 return;
494 }
495 kvmppc_xive_disconnect(spapr->xive, &local_err);
496 if (local_err) {
497 error_propagate(errp, local_err);
498 error_prepend(errp, "KVM XIVE disconnect failed: ");
499 return;
500 }
501 }
502
503 spapr_irq_current(spapr)->reset(spapr, errp);
504 }
505
506 static void spapr_irq_set_irq_dual(void *opaque, int irq, int val)
507 {
508 SpaprMachineState *spapr = opaque;
509
510 spapr_irq_current(spapr)->set_irq(spapr, irq, val);
511 }
512
513 /*
514 * Define values in sync with the XIVE and XICS backend
515 */
516 SpaprIrq spapr_irq_dual = {
517 .nr_xirqs = SPAPR_NR_XIRQS,
518 .nr_msis = SPAPR_NR_MSIS,
519 .ov5 = SPAPR_OV5_XIVE_BOTH,
520
521 .init = spapr_irq_init_dual,
522 .claim = spapr_irq_claim_dual,
523 .free = spapr_irq_free_dual,
524 .print_info = spapr_irq_print_info_dual,
525 .dt_populate = spapr_irq_dt_populate_dual,
526 .cpu_intc_create = spapr_irq_cpu_intc_create_dual,
527 .post_load = spapr_irq_post_load_dual,
528 .reset = spapr_irq_reset_dual,
529 .set_irq = spapr_irq_set_irq_dual,
530 .init_kvm = NULL, /* should not be used */
531 };
532
533
534 static void spapr_irq_check(SpaprMachineState *spapr, Error **errp)
535 {
536 MachineState *machine = MACHINE(spapr);
537
538 /*
539 * Sanity checks on non-P9 machines. On these, XIVE is not
540 * advertised, see spapr_dt_ov5_platform_support()
541 */
542 if (!ppc_type_check_compat(machine->cpu_type, CPU_POWERPC_LOGICAL_3_00,
543 0, spapr->max_compat_pvr)) {
544 /*
545 * If the 'dual' interrupt mode is selected, force XICS as CAS
546 * negotiation is useless.
547 */
548 if (spapr->irq == &spapr_irq_dual) {
549 spapr->irq = &spapr_irq_xics;
550 return;
551 }
552
553 /*
554 * Non-P9 machines using only XIVE is a bogus setup. We have two
555 * scenarios to take into account because of the compat mode:
556 *
557 * 1. POWER7/8 machines should fail to init later on when creating
558 * the XIVE interrupt presenters because a POWER9 exception
559 * model is required.
560
561 * 2. POWER9 machines using the POWER8 compat mode won't fail and
562 * will let the OS boot with a partial XIVE setup : DT
563 * properties but no hcalls.
564 *
565 * To cover both and not confuse the OS, add an early failure in
566 * QEMU.
567 */
568 if (spapr->irq == &spapr_irq_xive) {
569 error_setg(errp, "XIVE-only machines require a POWER9 CPU");
570 return;
571 }
572 }
573
574 /*
575 * On a POWER9 host, some older KVM XICS devices cannot be destroyed and
576 * re-created. Detect that early to avoid QEMU to exit later when the
577 * guest reboots.
578 */
579 if (kvm_enabled() &&
580 spapr->irq == &spapr_irq_dual &&
581 machine_kernel_irqchip_required(machine) &&
582 xics_kvm_has_broken_disconnect(spapr)) {
583 error_setg(errp, "KVM is too old to support ic-mode=dual,kernel-irqchip=on");
584 return;
585 }
586 }
587
588 /*
589 * sPAPR IRQ frontend routines for devices
590 */
591 void spapr_irq_init(SpaprMachineState *spapr, Error **errp)
592 {
593 MachineState *machine = MACHINE(spapr);
594 Error *local_err = NULL;
595
596 if (machine_kernel_irqchip_split(machine)) {
597 error_setg(errp, "kernel_irqchip split mode not supported on pseries");
598 return;
599 }
600
601 if (!kvm_enabled() && machine_kernel_irqchip_required(machine)) {
602 error_setg(errp,
603 "kernel_irqchip requested but only available with KVM");
604 return;
605 }
606
607 spapr_irq_check(spapr, &local_err);
608 if (local_err) {
609 error_propagate(errp, local_err);
610 return;
611 }
612
613 /* Initialize the MSI IRQ allocator. */
614 if (!SPAPR_MACHINE_GET_CLASS(spapr)->legacy_irq_allocation) {
615 spapr_irq_msi_init(spapr, spapr->irq->nr_msis);
616 }
617
618 spapr->irq->init(spapr, errp);
619
620 spapr->qirqs = qemu_allocate_irqs(spapr->irq->set_irq, spapr,
621 spapr->irq->nr_xirqs + SPAPR_XIRQ_BASE);
622 }
623
624 int spapr_irq_claim(SpaprMachineState *spapr, int irq, bool lsi, Error **errp)
625 {
626 return spapr->irq->claim(spapr, irq, lsi, errp);
627 }
628
629 void spapr_irq_free(SpaprMachineState *spapr, int irq, int num)
630 {
631 int i;
632
633 for (i = irq; i < (irq + num); i++) {
634 spapr->irq->free(spapr, i);
635 }
636 }
637
638 qemu_irq spapr_qirq(SpaprMachineState *spapr, int irq)
639 {
640 /*
641 * This interface is basically for VIO and PHB devices to find the
642 * right qemu_irq to manipulate, so we only allow access to the
643 * external irqs for now. Currently anything which needs to
644 * access the IPIs most naturally gets there via the guest side
645 * interfaces, we can change this if we need to in future.
646 */
647 assert(irq >= SPAPR_XIRQ_BASE);
648 assert(irq < (spapr->irq->nr_xirqs + SPAPR_XIRQ_BASE));
649
650 if (spapr->ics) {
651 assert(ics_valid_irq(spapr->ics, irq));
652 }
653 if (spapr->xive) {
654 assert(irq < spapr->xive->nr_irqs);
655 assert(xive_eas_is_valid(&spapr->xive->eat[irq]));
656 }
657
658 return spapr->qirqs[irq];
659 }
660
661 int spapr_irq_post_load(SpaprMachineState *spapr, int version_id)
662 {
663 return spapr->irq->post_load(spapr, version_id);
664 }
665
666 void spapr_irq_reset(SpaprMachineState *spapr, Error **errp)
667 {
668 assert(!spapr->irq_map || bitmap_empty(spapr->irq_map, spapr->irq_map_nr));
669
670 if (spapr->irq->reset) {
671 spapr->irq->reset(spapr, errp);
672 }
673 }
674
675 int spapr_irq_get_phandle(SpaprMachineState *spapr, void *fdt, Error **errp)
676 {
677 const char *nodename = "interrupt-controller";
678 int offset, phandle;
679
680 offset = fdt_subnode_offset(fdt, 0, nodename);
681 if (offset < 0) {
682 error_setg(errp, "Can't find node \"%s\": %s",
683 nodename, fdt_strerror(offset));
684 return -1;
685 }
686
687 phandle = fdt_get_phandle(fdt, offset);
688 if (!phandle) {
689 error_setg(errp, "Can't get phandle of node \"%s\"", nodename);
690 return -1;
691 }
692
693 return phandle;
694 }
695
696 /*
697 * XICS legacy routines - to deprecate one day
698 */
699
700 static int ics_find_free_block(ICSState *ics, int num, int alignnum)
701 {
702 int first, i;
703
704 for (first = 0; first < ics->nr_irqs; first += alignnum) {
705 if (num > (ics->nr_irqs - first)) {
706 return -1;
707 }
708 for (i = first; i < first + num; ++i) {
709 if (!ics_irq_free(ics, i)) {
710 break;
711 }
712 }
713 if (i == (first + num)) {
714 return first;
715 }
716 }
717
718 return -1;
719 }
720
721 int spapr_irq_find(SpaprMachineState *spapr, int num, bool align, Error **errp)
722 {
723 ICSState *ics = spapr->ics;
724 int first = -1;
725
726 assert(ics);
727
728 /*
729 * MSIMesage::data is used for storing VIRQ so
730 * it has to be aligned to num to support multiple
731 * MSI vectors. MSI-X is not affected by this.
732 * The hint is used for the first IRQ, the rest should
733 * be allocated continuously.
734 */
735 if (align) {
736 assert((num == 1) || (num == 2) || (num == 4) ||
737 (num == 8) || (num == 16) || (num == 32));
738 first = ics_find_free_block(ics, num, num);
739 } else {
740 first = ics_find_free_block(ics, num, 1);
741 }
742
743 if (first < 0) {
744 error_setg(errp, "can't find a free %d-IRQ block", num);
745 return -1;
746 }
747
748 return first + ics->offset;
749 }
750
751 #define SPAPR_IRQ_XICS_LEGACY_NR_XIRQS 0x400
752
753 SpaprIrq spapr_irq_xics_legacy = {
754 .nr_xirqs = SPAPR_IRQ_XICS_LEGACY_NR_XIRQS,
755 .nr_msis = SPAPR_IRQ_XICS_LEGACY_NR_XIRQS,
756 .ov5 = SPAPR_OV5_XIVE_LEGACY,
757
758 .init = spapr_irq_init_xics,
759 .claim = spapr_irq_claim_xics,
760 .free = spapr_irq_free_xics,
761 .print_info = spapr_irq_print_info_xics,
762 .dt_populate = spapr_dt_xics,
763 .cpu_intc_create = spapr_irq_cpu_intc_create_xics,
764 .post_load = spapr_irq_post_load_xics,
765 .reset = spapr_irq_reset_xics,
766 .set_irq = spapr_irq_set_irq_xics,
767 .init_kvm = spapr_irq_init_kvm_xics,
768 };