]> git.proxmox.com Git - mirror_qemu.git/blob - hw/intc/xics.c
hw/intc/xics: Reset TYPE_ICS objects with device_cold_reset()
[mirror_qemu.git] / hw / intc / xics.c
1 /*
2 * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator
3 *
4 * PAPR Virtualized Interrupt System, aka ICS/ICP aka xics
5 *
6 * Copyright (c) 2010,2011 David Gibson, IBM Corporation.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 * THE SOFTWARE.
25 *
26 */
27
28 #include "qemu/osdep.h"
29 #include "qapi/error.h"
30 #include "trace.h"
31 #include "qemu/timer.h"
32 #include "hw/ppc/xics.h"
33 #include "hw/qdev-properties.h"
34 #include "qemu/error-report.h"
35 #include "qemu/module.h"
36 #include "qapi/visitor.h"
37 #include "migration/vmstate.h"
38 #include "monitor/monitor.h"
39 #include "hw/intc/intc.h"
40 #include "hw/irq.h"
41 #include "sysemu/kvm.h"
42 #include "sysemu/reset.h"
43
44 void icp_pic_print_info(ICPState *icp, Monitor *mon)
45 {
46 int cpu_index;
47
48 /* Skip partially initialized vCPUs. This can happen on sPAPR when vCPUs
49 * are hot plugged or unplugged.
50 */
51 if (!icp) {
52 return;
53 }
54
55 cpu_index = icp->cs ? icp->cs->cpu_index : -1;
56
57 if (!icp->output) {
58 return;
59 }
60
61 if (kvm_irqchip_in_kernel()) {
62 icp_synchronize_state(icp);
63 }
64
65 monitor_printf(mon, "CPU %d XIRR=%08x (%p) PP=%02x MFRR=%02x\n",
66 cpu_index, icp->xirr, icp->xirr_owner,
67 icp->pending_priority, icp->mfrr);
68 }
69
70 void ics_pic_print_info(ICSState *ics, Monitor *mon)
71 {
72 uint32_t i;
73
74 monitor_printf(mon, "ICS %4x..%4x %p\n",
75 ics->offset, ics->offset + ics->nr_irqs - 1, ics);
76
77 if (!ics->irqs) {
78 return;
79 }
80
81 if (kvm_irqchip_in_kernel()) {
82 ics_synchronize_state(ics);
83 }
84
85 for (i = 0; i < ics->nr_irqs; i++) {
86 ICSIRQState *irq = ics->irqs + i;
87
88 if (!(irq->flags & XICS_FLAGS_IRQ_MASK)) {
89 continue;
90 }
91 monitor_printf(mon, " %4x %s %02x %02x\n",
92 ics->offset + i,
93 (irq->flags & XICS_FLAGS_IRQ_LSI) ?
94 "LSI" : "MSI",
95 irq->priority, irq->status);
96 }
97 }
98
99 /*
100 * ICP: Presentation layer
101 */
102
103 #define XISR_MASK 0x00ffffff
104 #define CPPR_MASK 0xff000000
105
106 #define XISR(icp) (((icp)->xirr) & XISR_MASK)
107 #define CPPR(icp) (((icp)->xirr) >> 24)
108
109 static void ics_reject(ICSState *ics, uint32_t nr);
110 static void ics_eoi(ICSState *ics, uint32_t nr);
111
112 static void icp_check_ipi(ICPState *icp)
113 {
114 if (XISR(icp) && (icp->pending_priority <= icp->mfrr)) {
115 return;
116 }
117
118 trace_xics_icp_check_ipi(icp->cs->cpu_index, icp->mfrr);
119
120 if (XISR(icp) && icp->xirr_owner) {
121 ics_reject(icp->xirr_owner, XISR(icp));
122 }
123
124 icp->xirr = (icp->xirr & ~XISR_MASK) | XICS_IPI;
125 icp->pending_priority = icp->mfrr;
126 icp->xirr_owner = NULL;
127 qemu_irq_raise(icp->output);
128 }
129
130 void icp_resend(ICPState *icp)
131 {
132 XICSFabric *xi = icp->xics;
133 XICSFabricClass *xic = XICS_FABRIC_GET_CLASS(xi);
134
135 if (icp->mfrr < CPPR(icp)) {
136 icp_check_ipi(icp);
137 }
138
139 xic->ics_resend(xi);
140 }
141
142 void icp_set_cppr(ICPState *icp, uint8_t cppr)
143 {
144 uint8_t old_cppr;
145 uint32_t old_xisr;
146
147 old_cppr = CPPR(icp);
148 icp->xirr = (icp->xirr & ~CPPR_MASK) | (cppr << 24);
149
150 if (cppr < old_cppr) {
151 if (XISR(icp) && (cppr <= icp->pending_priority)) {
152 old_xisr = XISR(icp);
153 icp->xirr &= ~XISR_MASK; /* Clear XISR */
154 icp->pending_priority = 0xff;
155 qemu_irq_lower(icp->output);
156 if (icp->xirr_owner) {
157 ics_reject(icp->xirr_owner, old_xisr);
158 icp->xirr_owner = NULL;
159 }
160 }
161 } else {
162 if (!XISR(icp)) {
163 icp_resend(icp);
164 }
165 }
166 }
167
168 void icp_set_mfrr(ICPState *icp, uint8_t mfrr)
169 {
170 icp->mfrr = mfrr;
171 if (mfrr < CPPR(icp)) {
172 icp_check_ipi(icp);
173 }
174 }
175
176 uint32_t icp_accept(ICPState *icp)
177 {
178 uint32_t xirr = icp->xirr;
179
180 qemu_irq_lower(icp->output);
181 icp->xirr = icp->pending_priority << 24;
182 icp->pending_priority = 0xff;
183 icp->xirr_owner = NULL;
184
185 trace_xics_icp_accept(xirr, icp->xirr);
186
187 return xirr;
188 }
189
190 uint32_t icp_ipoll(ICPState *icp, uint32_t *mfrr)
191 {
192 if (mfrr) {
193 *mfrr = icp->mfrr;
194 }
195 return icp->xirr;
196 }
197
198 void icp_eoi(ICPState *icp, uint32_t xirr)
199 {
200 XICSFabric *xi = icp->xics;
201 XICSFabricClass *xic = XICS_FABRIC_GET_CLASS(xi);
202 ICSState *ics;
203 uint32_t irq;
204
205 /* Send EOI -> ICS */
206 icp->xirr = (icp->xirr & ~CPPR_MASK) | (xirr & CPPR_MASK);
207 trace_xics_icp_eoi(icp->cs->cpu_index, xirr, icp->xirr);
208 irq = xirr & XISR_MASK;
209
210 ics = xic->ics_get(xi, irq);
211 if (ics) {
212 ics_eoi(ics, irq);
213 }
214 if (!XISR(icp)) {
215 icp_resend(icp);
216 }
217 }
218
219 void icp_irq(ICSState *ics, int server, int nr, uint8_t priority)
220 {
221 ICPState *icp = xics_icp_get(ics->xics, server);
222
223 trace_xics_icp_irq(server, nr, priority);
224
225 if ((priority >= CPPR(icp))
226 || (XISR(icp) && (icp->pending_priority <= priority))) {
227 ics_reject(ics, nr);
228 } else {
229 if (XISR(icp) && icp->xirr_owner) {
230 ics_reject(icp->xirr_owner, XISR(icp));
231 icp->xirr_owner = NULL;
232 }
233 icp->xirr = (icp->xirr & ~XISR_MASK) | (nr & XISR_MASK);
234 icp->xirr_owner = ics;
235 icp->pending_priority = priority;
236 trace_xics_icp_raise(icp->xirr, icp->pending_priority);
237 qemu_irq_raise(icp->output);
238 }
239 }
240
241 static int icp_pre_save(void *opaque)
242 {
243 ICPState *icp = opaque;
244
245 if (kvm_irqchip_in_kernel()) {
246 icp_get_kvm_state(icp);
247 }
248
249 return 0;
250 }
251
252 static int icp_post_load(void *opaque, int version_id)
253 {
254 ICPState *icp = opaque;
255
256 if (kvm_irqchip_in_kernel()) {
257 Error *local_err = NULL;
258 int ret;
259
260 ret = icp_set_kvm_state(icp, &local_err);
261 if (ret < 0) {
262 error_report_err(local_err);
263 return ret;
264 }
265 }
266
267 return 0;
268 }
269
270 static const VMStateDescription vmstate_icp_server = {
271 .name = "icp/server",
272 .version_id = 1,
273 .minimum_version_id = 1,
274 .pre_save = icp_pre_save,
275 .post_load = icp_post_load,
276 .fields = (VMStateField[]) {
277 /* Sanity check */
278 VMSTATE_UINT32(xirr, ICPState),
279 VMSTATE_UINT8(pending_priority, ICPState),
280 VMSTATE_UINT8(mfrr, ICPState),
281 VMSTATE_END_OF_LIST()
282 },
283 };
284
285 void icp_reset(ICPState *icp)
286 {
287 icp->xirr = 0;
288 icp->pending_priority = 0xff;
289 icp->mfrr = 0xff;
290
291 if (kvm_irqchip_in_kernel()) {
292 Error *local_err = NULL;
293
294 icp_set_kvm_state(icp, &local_err);
295 if (local_err) {
296 error_report_err(local_err);
297 }
298 }
299 }
300
301 static void icp_realize(DeviceState *dev, Error **errp)
302 {
303 ICPState *icp = ICP(dev);
304 PowerPCCPU *cpu;
305 CPUPPCState *env;
306 Error *err = NULL;
307
308 assert(icp->xics);
309 assert(icp->cs);
310
311 cpu = POWERPC_CPU(icp->cs);
312 env = &cpu->env;
313 switch (PPC_INPUT(env)) {
314 case PPC_FLAGS_INPUT_POWER7:
315 icp->output = qdev_get_gpio_in(DEVICE(cpu), POWER7_INPUT_INT);
316 break;
317 case PPC_FLAGS_INPUT_POWER9: /* For SPAPR xics emulation */
318 icp->output = qdev_get_gpio_in(DEVICE(cpu), POWER9_INPUT_INT);
319 break;
320
321 case PPC_FLAGS_INPUT_970:
322 icp->output = qdev_get_gpio_in(DEVICE(cpu), PPC970_INPUT_INT);
323 break;
324
325 default:
326 error_setg(errp, "XICS interrupt controller does not support this CPU bus model");
327 return;
328 }
329
330 /* Connect the presenter to the VCPU (required for CPU hotplug) */
331 if (kvm_irqchip_in_kernel()) {
332 icp_kvm_realize(dev, &err);
333 if (err) {
334 error_propagate(errp, err);
335 return;
336 }
337 }
338
339 vmstate_register(NULL, icp->cs->cpu_index, &vmstate_icp_server, icp);
340 }
341
342 static void icp_unrealize(DeviceState *dev)
343 {
344 ICPState *icp = ICP(dev);
345
346 vmstate_unregister(NULL, &vmstate_icp_server, icp);
347 }
348
349 static Property icp_properties[] = {
350 DEFINE_PROP_LINK(ICP_PROP_XICS, ICPState, xics, TYPE_XICS_FABRIC,
351 XICSFabric *),
352 DEFINE_PROP_LINK(ICP_PROP_CPU, ICPState, cs, TYPE_CPU, CPUState *),
353 DEFINE_PROP_END_OF_LIST(),
354 };
355
356 static void icp_class_init(ObjectClass *klass, void *data)
357 {
358 DeviceClass *dc = DEVICE_CLASS(klass);
359
360 dc->realize = icp_realize;
361 dc->unrealize = icp_unrealize;
362 device_class_set_props(dc, icp_properties);
363 /*
364 * Reason: part of XICS interrupt controller, needs to be wired up
365 * by icp_create().
366 */
367 dc->user_creatable = false;
368 }
369
370 static const TypeInfo icp_info = {
371 .name = TYPE_ICP,
372 .parent = TYPE_DEVICE,
373 .instance_size = sizeof(ICPState),
374 .class_init = icp_class_init,
375 .class_size = sizeof(ICPStateClass),
376 };
377
378 Object *icp_create(Object *cpu, const char *type, XICSFabric *xi, Error **errp)
379 {
380 Object *obj;
381
382 obj = object_new(type);
383 object_property_add_child(cpu, type, obj);
384 object_unref(obj);
385 object_property_set_link(obj, ICP_PROP_XICS, OBJECT(xi), &error_abort);
386 object_property_set_link(obj, ICP_PROP_CPU, cpu, &error_abort);
387 if (!qdev_realize(DEVICE(obj), NULL, errp)) {
388 object_unparent(obj);
389 obj = NULL;
390 }
391
392 return obj;
393 }
394
395 void icp_destroy(ICPState *icp)
396 {
397 Object *obj = OBJECT(icp);
398
399 object_unparent(obj);
400 }
401
402 /*
403 * ICS: Source layer
404 */
405 static void ics_resend_msi(ICSState *ics, int srcno)
406 {
407 ICSIRQState *irq = ics->irqs + srcno;
408
409 /* FIXME: filter by server#? */
410 if (irq->status & XICS_STATUS_REJECTED) {
411 irq->status &= ~XICS_STATUS_REJECTED;
412 if (irq->priority != 0xff) {
413 icp_irq(ics, irq->server, srcno + ics->offset, irq->priority);
414 }
415 }
416 }
417
418 static void ics_resend_lsi(ICSState *ics, int srcno)
419 {
420 ICSIRQState *irq = ics->irqs + srcno;
421
422 if ((irq->priority != 0xff)
423 && (irq->status & XICS_STATUS_ASSERTED)
424 && !(irq->status & XICS_STATUS_SENT)) {
425 irq->status |= XICS_STATUS_SENT;
426 icp_irq(ics, irq->server, srcno + ics->offset, irq->priority);
427 }
428 }
429
430 static void ics_set_irq_msi(ICSState *ics, int srcno, int val)
431 {
432 ICSIRQState *irq = ics->irqs + srcno;
433
434 trace_xics_ics_set_irq_msi(srcno, srcno + ics->offset);
435
436 if (val) {
437 if (irq->priority == 0xff) {
438 irq->status |= XICS_STATUS_MASKED_PENDING;
439 trace_xics_masked_pending();
440 } else {
441 icp_irq(ics, irq->server, srcno + ics->offset, irq->priority);
442 }
443 }
444 }
445
446 static void ics_set_irq_lsi(ICSState *ics, int srcno, int val)
447 {
448 ICSIRQState *irq = ics->irqs + srcno;
449
450 trace_xics_ics_set_irq_lsi(srcno, srcno + ics->offset);
451 if (val) {
452 irq->status |= XICS_STATUS_ASSERTED;
453 } else {
454 irq->status &= ~XICS_STATUS_ASSERTED;
455 }
456 ics_resend_lsi(ics, srcno);
457 }
458
459 void ics_set_irq(void *opaque, int srcno, int val)
460 {
461 ICSState *ics = (ICSState *)opaque;
462
463 if (kvm_irqchip_in_kernel()) {
464 ics_kvm_set_irq(ics, srcno, val);
465 return;
466 }
467
468 if (ics->irqs[srcno].flags & XICS_FLAGS_IRQ_LSI) {
469 ics_set_irq_lsi(ics, srcno, val);
470 } else {
471 ics_set_irq_msi(ics, srcno, val);
472 }
473 }
474
475 static void ics_write_xive_msi(ICSState *ics, int srcno)
476 {
477 ICSIRQState *irq = ics->irqs + srcno;
478
479 if (!(irq->status & XICS_STATUS_MASKED_PENDING)
480 || (irq->priority == 0xff)) {
481 return;
482 }
483
484 irq->status &= ~XICS_STATUS_MASKED_PENDING;
485 icp_irq(ics, irq->server, srcno + ics->offset, irq->priority);
486 }
487
488 static void ics_write_xive_lsi(ICSState *ics, int srcno)
489 {
490 ics_resend_lsi(ics, srcno);
491 }
492
493 void ics_write_xive(ICSState *ics, int srcno, int server,
494 uint8_t priority, uint8_t saved_priority)
495 {
496 ICSIRQState *irq = ics->irqs + srcno;
497
498 irq->server = server;
499 irq->priority = priority;
500 irq->saved_priority = saved_priority;
501
502 trace_xics_ics_write_xive(ics->offset + srcno, srcno, server, priority);
503
504 if (ics->irqs[srcno].flags & XICS_FLAGS_IRQ_LSI) {
505 ics_write_xive_lsi(ics, srcno);
506 } else {
507 ics_write_xive_msi(ics, srcno);
508 }
509 }
510
511 static void ics_reject(ICSState *ics, uint32_t nr)
512 {
513 ICSStateClass *isc = ICS_GET_CLASS(ics);
514 ICSIRQState *irq = ics->irqs + nr - ics->offset;
515
516 if (isc->reject) {
517 isc->reject(ics, nr);
518 return;
519 }
520
521 trace_xics_ics_reject(nr, nr - ics->offset);
522 if (irq->flags & XICS_FLAGS_IRQ_MSI) {
523 irq->status |= XICS_STATUS_REJECTED;
524 } else if (irq->flags & XICS_FLAGS_IRQ_LSI) {
525 irq->status &= ~XICS_STATUS_SENT;
526 }
527 }
528
529 void ics_resend(ICSState *ics)
530 {
531 ICSStateClass *isc = ICS_GET_CLASS(ics);
532 int i;
533
534 if (isc->resend) {
535 isc->resend(ics);
536 return;
537 }
538
539 for (i = 0; i < ics->nr_irqs; i++) {
540 /* FIXME: filter by server#? */
541 if (ics->irqs[i].flags & XICS_FLAGS_IRQ_LSI) {
542 ics_resend_lsi(ics, i);
543 } else {
544 ics_resend_msi(ics, i);
545 }
546 }
547 }
548
549 static void ics_eoi(ICSState *ics, uint32_t nr)
550 {
551 int srcno = nr - ics->offset;
552 ICSIRQState *irq = ics->irqs + srcno;
553
554 trace_xics_ics_eoi(nr);
555
556 if (ics->irqs[srcno].flags & XICS_FLAGS_IRQ_LSI) {
557 irq->status &= ~XICS_STATUS_SENT;
558 }
559 }
560
561 static void ics_reset_irq(ICSIRQState *irq)
562 {
563 irq->priority = 0xff;
564 irq->saved_priority = 0xff;
565 }
566
567 static void ics_reset(DeviceState *dev)
568 {
569 ICSState *ics = ICS(dev);
570 g_autofree uint8_t *flags = g_malloc(ics->nr_irqs);
571 int i;
572
573 for (i = 0; i < ics->nr_irqs; i++) {
574 flags[i] = ics->irqs[i].flags;
575 }
576
577 memset(ics->irqs, 0, sizeof(ICSIRQState) * ics->nr_irqs);
578
579 for (i = 0; i < ics->nr_irqs; i++) {
580 ics_reset_irq(ics->irqs + i);
581 ics->irqs[i].flags = flags[i];
582 }
583
584 if (kvm_irqchip_in_kernel()) {
585 Error *local_err = NULL;
586
587 ics_set_kvm_state(ICS(dev), &local_err);
588 if (local_err) {
589 error_report_err(local_err);
590 }
591 }
592 }
593
594 static void ics_reset_handler(void *dev)
595 {
596 device_cold_reset(dev);
597 }
598
599 static void ics_realize(DeviceState *dev, Error **errp)
600 {
601 ICSState *ics = ICS(dev);
602
603 assert(ics->xics);
604
605 if (!ics->nr_irqs) {
606 error_setg(errp, "Number of interrupts needs to be greater 0");
607 return;
608 }
609 ics->irqs = g_new0(ICSIRQState, ics->nr_irqs);
610
611 qemu_register_reset(ics_reset_handler, ics);
612 }
613
614 static void ics_instance_init(Object *obj)
615 {
616 ICSState *ics = ICS(obj);
617
618 ics->offset = XICS_IRQ_BASE;
619 }
620
621 static int ics_pre_save(void *opaque)
622 {
623 ICSState *ics = opaque;
624
625 if (kvm_irqchip_in_kernel()) {
626 ics_get_kvm_state(ics);
627 }
628
629 return 0;
630 }
631
632 static int ics_post_load(void *opaque, int version_id)
633 {
634 ICSState *ics = opaque;
635
636 if (kvm_irqchip_in_kernel()) {
637 Error *local_err = NULL;
638 int ret;
639
640 ret = ics_set_kvm_state(ics, &local_err);
641 if (ret < 0) {
642 error_report_err(local_err);
643 return ret;
644 }
645 }
646
647 return 0;
648 }
649
650 static const VMStateDescription vmstate_ics_irq = {
651 .name = "ics/irq",
652 .version_id = 2,
653 .minimum_version_id = 1,
654 .fields = (VMStateField[]) {
655 VMSTATE_UINT32(server, ICSIRQState),
656 VMSTATE_UINT8(priority, ICSIRQState),
657 VMSTATE_UINT8(saved_priority, ICSIRQState),
658 VMSTATE_UINT8(status, ICSIRQState),
659 VMSTATE_UINT8(flags, ICSIRQState),
660 VMSTATE_END_OF_LIST()
661 },
662 };
663
664 static const VMStateDescription vmstate_ics = {
665 .name = "ics",
666 .version_id = 1,
667 .minimum_version_id = 1,
668 .pre_save = ics_pre_save,
669 .post_load = ics_post_load,
670 .fields = (VMStateField[]) {
671 /* Sanity check */
672 VMSTATE_UINT32_EQUAL(nr_irqs, ICSState, NULL),
673
674 VMSTATE_STRUCT_VARRAY_POINTER_UINT32(irqs, ICSState, nr_irqs,
675 vmstate_ics_irq,
676 ICSIRQState),
677 VMSTATE_END_OF_LIST()
678 },
679 };
680
681 static Property ics_properties[] = {
682 DEFINE_PROP_UINT32("nr-irqs", ICSState, nr_irqs, 0),
683 DEFINE_PROP_LINK(ICS_PROP_XICS, ICSState, xics, TYPE_XICS_FABRIC,
684 XICSFabric *),
685 DEFINE_PROP_END_OF_LIST(),
686 };
687
688 static void ics_class_init(ObjectClass *klass, void *data)
689 {
690 DeviceClass *dc = DEVICE_CLASS(klass);
691
692 dc->realize = ics_realize;
693 device_class_set_props(dc, ics_properties);
694 dc->reset = ics_reset;
695 dc->vmsd = &vmstate_ics;
696 /*
697 * Reason: part of XICS interrupt controller, needs to be wired up,
698 * e.g. by spapr_irq_init().
699 */
700 dc->user_creatable = false;
701 }
702
703 static const TypeInfo ics_info = {
704 .name = TYPE_ICS,
705 .parent = TYPE_DEVICE,
706 .instance_size = sizeof(ICSState),
707 .instance_init = ics_instance_init,
708 .class_init = ics_class_init,
709 .class_size = sizeof(ICSStateClass),
710 };
711
712 static const TypeInfo xics_fabric_info = {
713 .name = TYPE_XICS_FABRIC,
714 .parent = TYPE_INTERFACE,
715 .class_size = sizeof(XICSFabricClass),
716 };
717
718 /*
719 * Exported functions
720 */
721 ICPState *xics_icp_get(XICSFabric *xi, int server)
722 {
723 XICSFabricClass *xic = XICS_FABRIC_GET_CLASS(xi);
724
725 return xic->icp_get(xi, server);
726 }
727
728 void ics_set_irq_type(ICSState *ics, int srcno, bool lsi)
729 {
730 assert(!(ics->irqs[srcno].flags & XICS_FLAGS_IRQ_MASK));
731
732 ics->irqs[srcno].flags |=
733 lsi ? XICS_FLAGS_IRQ_LSI : XICS_FLAGS_IRQ_MSI;
734
735 if (kvm_irqchip_in_kernel()) {
736 Error *local_err = NULL;
737
738 ics_reset_irq(ics->irqs + srcno);
739 ics_set_kvm_state_one(ics, srcno, &local_err);
740 if (local_err) {
741 error_report_err(local_err);
742 }
743 }
744 }
745
746 static void xics_register_types(void)
747 {
748 type_register_static(&ics_info);
749 type_register_static(&icp_info);
750 type_register_static(&xics_fabric_info);
751 }
752
753 type_init(xics_register_types)