]> git.proxmox.com Git - mirror_qemu.git/blob - hw/intc/spapr_xive.c
spapr/irq: introduce a spapr_irq_init_device() helper
[mirror_qemu.git] / hw / intc / spapr_xive.c
1 /*
2 * QEMU PowerPC sPAPR XIVE interrupt controller model
3 *
4 * Copyright (c) 2017-2018, IBM Corporation.
5 *
6 * This code is licensed under the GPL version 2 or later. See the
7 * COPYING file in the top-level directory.
8 */
9
10 #include "qemu/osdep.h"
11 #include "qemu/log.h"
12 #include "qapi/error.h"
13 #include "qemu/error-report.h"
14 #include "target/ppc/cpu.h"
15 #include "sysemu/cpus.h"
16 #include "monitor/monitor.h"
17 #include "hw/ppc/fdt.h"
18 #include "hw/ppc/spapr.h"
19 #include "hw/ppc/spapr_cpu_core.h"
20 #include "hw/ppc/spapr_xive.h"
21 #include "hw/ppc/xive.h"
22 #include "hw/ppc/xive_regs.h"
23
24 /*
25 * XIVE Virtualization Controller BAR and Thread Managment BAR that we
26 * use for the ESB pages and the TIMA pages
27 */
28 #define SPAPR_XIVE_VC_BASE 0x0006010000000000ull
29 #define SPAPR_XIVE_TM_BASE 0x0006030203180000ull
30
31 /*
32 * The allocation of VP blocks is a complex operation in OPAL and the
33 * VP identifiers have a relation with the number of HW chips, the
34 * size of the VP blocks, VP grouping, etc. The QEMU sPAPR XIVE
35 * controller model does not have the same constraints and can use a
36 * simple mapping scheme of the CPU vcpu_id
37 *
38 * These identifiers are never returned to the OS.
39 */
40
41 #define SPAPR_XIVE_NVT_BASE 0x400
42
43 /*
44 * sPAPR NVT and END indexing helpers
45 */
46 static uint32_t spapr_xive_nvt_to_target(uint8_t nvt_blk, uint32_t nvt_idx)
47 {
48 return nvt_idx - SPAPR_XIVE_NVT_BASE;
49 }
50
51 static void spapr_xive_cpu_to_nvt(PowerPCCPU *cpu,
52 uint8_t *out_nvt_blk, uint32_t *out_nvt_idx)
53 {
54 assert(cpu);
55
56 if (out_nvt_blk) {
57 *out_nvt_blk = SPAPR_XIVE_BLOCK_ID;
58 }
59
60 if (out_nvt_blk) {
61 *out_nvt_idx = SPAPR_XIVE_NVT_BASE + cpu->vcpu_id;
62 }
63 }
64
65 static int spapr_xive_target_to_nvt(uint32_t target,
66 uint8_t *out_nvt_blk, uint32_t *out_nvt_idx)
67 {
68 PowerPCCPU *cpu = spapr_find_cpu(target);
69
70 if (!cpu) {
71 return -1;
72 }
73
74 spapr_xive_cpu_to_nvt(cpu, out_nvt_blk, out_nvt_idx);
75 return 0;
76 }
77
78 /*
79 * sPAPR END indexing uses a simple mapping of the CPU vcpu_id, 8
80 * priorities per CPU
81 */
82 int spapr_xive_end_to_target(uint8_t end_blk, uint32_t end_idx,
83 uint32_t *out_server, uint8_t *out_prio)
84 {
85
86 assert(end_blk == SPAPR_XIVE_BLOCK_ID);
87
88 if (out_server) {
89 *out_server = end_idx >> 3;
90 }
91
92 if (out_prio) {
93 *out_prio = end_idx & 0x7;
94 }
95 return 0;
96 }
97
98 static void spapr_xive_cpu_to_end(PowerPCCPU *cpu, uint8_t prio,
99 uint8_t *out_end_blk, uint32_t *out_end_idx)
100 {
101 assert(cpu);
102
103 if (out_end_blk) {
104 *out_end_blk = SPAPR_XIVE_BLOCK_ID;
105 }
106
107 if (out_end_idx) {
108 *out_end_idx = (cpu->vcpu_id << 3) + prio;
109 }
110 }
111
112 static int spapr_xive_target_to_end(uint32_t target, uint8_t prio,
113 uint8_t *out_end_blk, uint32_t *out_end_idx)
114 {
115 PowerPCCPU *cpu = spapr_find_cpu(target);
116
117 if (!cpu) {
118 return -1;
119 }
120
121 spapr_xive_cpu_to_end(cpu, prio, out_end_blk, out_end_idx);
122 return 0;
123 }
124
125 /*
126 * On sPAPR machines, use a simplified output for the XIVE END
127 * structure dumping only the information related to the OS EQ.
128 */
129 static void spapr_xive_end_pic_print_info(SpaprXive *xive, XiveEND *end,
130 Monitor *mon)
131 {
132 uint64_t qaddr_base = xive_end_qaddr(end);
133 uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
134 uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1);
135 uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
136 uint32_t qentries = 1 << (qsize + 10);
137 uint32_t nvt = xive_get_field32(END_W6_NVT_INDEX, end->w6);
138 uint8_t priority = xive_get_field32(END_W7_F0_PRIORITY, end->w7);
139
140 monitor_printf(mon, "%3d/%d % 6d/%5d @%"PRIx64" ^%d",
141 spapr_xive_nvt_to_target(0, nvt),
142 priority, qindex, qentries, qaddr_base, qgen);
143
144 xive_end_queue_pic_print_info(end, 6, mon);
145 monitor_printf(mon, "]");
146 }
147
148 void spapr_xive_pic_print_info(SpaprXive *xive, Monitor *mon)
149 {
150 XiveSource *xsrc = &xive->source;
151 int i;
152
153 if (kvm_irqchip_in_kernel()) {
154 Error *local_err = NULL;
155
156 kvmppc_xive_synchronize_state(xive, &local_err);
157 if (local_err) {
158 error_report_err(local_err);
159 return;
160 }
161 }
162
163 monitor_printf(mon, " LISN PQ EISN CPU/PRIO EQ\n");
164
165 for (i = 0; i < xive->nr_irqs; i++) {
166 uint8_t pq = xive_source_esb_get(xsrc, i);
167 XiveEAS *eas = &xive->eat[i];
168
169 if (!xive_eas_is_valid(eas)) {
170 continue;
171 }
172
173 monitor_printf(mon, " %08x %s %c%c%c %s %08x ", i,
174 xive_source_irq_is_lsi(xsrc, i) ? "LSI" : "MSI",
175 pq & XIVE_ESB_VAL_P ? 'P' : '-',
176 pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
177 xsrc->status[i] & XIVE_STATUS_ASSERTED ? 'A' : ' ',
178 xive_eas_is_masked(eas) ? "M" : " ",
179 (int) xive_get_field64(EAS_END_DATA, eas->w));
180
181 if (!xive_eas_is_masked(eas)) {
182 uint32_t end_idx = xive_get_field64(EAS_END_INDEX, eas->w);
183 XiveEND *end;
184
185 assert(end_idx < xive->nr_ends);
186 end = &xive->endt[end_idx];
187
188 if (xive_end_is_valid(end)) {
189 spapr_xive_end_pic_print_info(xive, end, mon);
190 }
191 }
192 monitor_printf(mon, "\n");
193 }
194 }
195
196 void spapr_xive_map_mmio(SpaprXive *xive)
197 {
198 sysbus_mmio_map(SYS_BUS_DEVICE(xive), 0, xive->vc_base);
199 sysbus_mmio_map(SYS_BUS_DEVICE(xive), 1, xive->end_base);
200 sysbus_mmio_map(SYS_BUS_DEVICE(xive), 2, xive->tm_base);
201 }
202
203 void spapr_xive_mmio_set_enabled(SpaprXive *xive, bool enable)
204 {
205 memory_region_set_enabled(&xive->source.esb_mmio, enable);
206 memory_region_set_enabled(&xive->tm_mmio, enable);
207
208 /* Disable the END ESBs until a guest OS makes use of them */
209 memory_region_set_enabled(&xive->end_source.esb_mmio, false);
210 }
211
212 /*
213 * When a Virtual Processor is scheduled to run on a HW thread, the
214 * hypervisor pushes its identifier in the OS CAM line. Emulate the
215 * same behavior under QEMU.
216 */
217 void spapr_xive_set_tctx_os_cam(XiveTCTX *tctx)
218 {
219 uint8_t nvt_blk;
220 uint32_t nvt_idx;
221 uint32_t nvt_cam;
222
223 spapr_xive_cpu_to_nvt(POWERPC_CPU(tctx->cs), &nvt_blk, &nvt_idx);
224
225 nvt_cam = cpu_to_be32(TM_QW1W2_VO | xive_nvt_cam_line(nvt_blk, nvt_idx));
226 memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &nvt_cam, 4);
227 }
228
229 static void spapr_xive_end_reset(XiveEND *end)
230 {
231 memset(end, 0, sizeof(*end));
232
233 /* switch off the escalation and notification ESBs */
234 end->w1 = cpu_to_be32(END_W1_ESe_Q | END_W1_ESn_Q);
235 }
236
237 static void spapr_xive_reset(void *dev)
238 {
239 SpaprXive *xive = SPAPR_XIVE(dev);
240 int i;
241
242 /*
243 * The XiveSource has its own reset handler, which mask off all
244 * IRQs (!P|Q)
245 */
246
247 /* Mask all valid EASs in the IRQ number space. */
248 for (i = 0; i < xive->nr_irqs; i++) {
249 XiveEAS *eas = &xive->eat[i];
250 if (xive_eas_is_valid(eas)) {
251 eas->w = cpu_to_be64(EAS_VALID | EAS_MASKED);
252 } else {
253 eas->w = 0;
254 }
255 }
256
257 /* Clear all ENDs */
258 for (i = 0; i < xive->nr_ends; i++) {
259 spapr_xive_end_reset(&xive->endt[i]);
260 }
261 }
262
263 static void spapr_xive_instance_init(Object *obj)
264 {
265 SpaprXive *xive = SPAPR_XIVE(obj);
266
267 object_initialize_child(obj, "source", &xive->source, sizeof(xive->source),
268 TYPE_XIVE_SOURCE, &error_abort, NULL);
269
270 object_initialize_child(obj, "end_source", &xive->end_source,
271 sizeof(xive->end_source), TYPE_XIVE_END_SOURCE,
272 &error_abort, NULL);
273
274 /* Not connected to the KVM XIVE device */
275 xive->fd = -1;
276 }
277
278 static void spapr_xive_realize(DeviceState *dev, Error **errp)
279 {
280 SpaprXive *xive = SPAPR_XIVE(dev);
281 XiveSource *xsrc = &xive->source;
282 XiveENDSource *end_xsrc = &xive->end_source;
283 Error *local_err = NULL;
284
285 if (!xive->nr_irqs) {
286 error_setg(errp, "Number of interrupt needs to be greater 0");
287 return;
288 }
289
290 if (!xive->nr_ends) {
291 error_setg(errp, "Number of interrupt needs to be greater 0");
292 return;
293 }
294
295 /*
296 * Initialize the internal sources, for IPIs and virtual devices.
297 */
298 object_property_set_int(OBJECT(xsrc), xive->nr_irqs, "nr-irqs",
299 &error_fatal);
300 object_property_add_const_link(OBJECT(xsrc), "xive", OBJECT(xive),
301 &error_fatal);
302 object_property_set_bool(OBJECT(xsrc), true, "realized", &local_err);
303 if (local_err) {
304 error_propagate(errp, local_err);
305 return;
306 }
307
308 /*
309 * Initialize the END ESB source
310 */
311 object_property_set_int(OBJECT(end_xsrc), xive->nr_irqs, "nr-ends",
312 &error_fatal);
313 object_property_add_const_link(OBJECT(end_xsrc), "xive", OBJECT(xive),
314 &error_fatal);
315 object_property_set_bool(OBJECT(end_xsrc), true, "realized", &local_err);
316 if (local_err) {
317 error_propagate(errp, local_err);
318 return;
319 }
320
321 /* Set the mapping address of the END ESB pages after the source ESBs */
322 xive->end_base = xive->vc_base + (1ull << xsrc->esb_shift) * xsrc->nr_irqs;
323
324 /*
325 * Allocate the routing tables
326 */
327 xive->eat = g_new0(XiveEAS, xive->nr_irqs);
328 xive->endt = g_new0(XiveEND, xive->nr_ends);
329
330 xive->nodename = g_strdup_printf("interrupt-controller@%" PRIx64,
331 xive->tm_base + XIVE_TM_USER_PAGE * (1 << TM_SHIFT));
332
333 qemu_register_reset(spapr_xive_reset, dev);
334 }
335
336 void spapr_xive_init(SpaprXive *xive, Error **errp)
337 {
338 XiveSource *xsrc = &xive->source;
339 XiveENDSource *end_xsrc = &xive->end_source;
340
341 /* TIMA initialization */
342 memory_region_init_io(&xive->tm_mmio, OBJECT(xive), &xive_tm_ops, xive,
343 "xive.tima", 4ull << TM_SHIFT);
344
345 /* Define all XIVE MMIO regions on SysBus */
346 sysbus_init_mmio(SYS_BUS_DEVICE(xive), &xsrc->esb_mmio);
347 sysbus_init_mmio(SYS_BUS_DEVICE(xive), &end_xsrc->esb_mmio);
348 sysbus_init_mmio(SYS_BUS_DEVICE(xive), &xive->tm_mmio);
349
350 /* Map all regions */
351 spapr_xive_map_mmio(xive);
352 }
353
354 static int spapr_xive_get_eas(XiveRouter *xrtr, uint8_t eas_blk,
355 uint32_t eas_idx, XiveEAS *eas)
356 {
357 SpaprXive *xive = SPAPR_XIVE(xrtr);
358
359 if (eas_idx >= xive->nr_irqs) {
360 return -1;
361 }
362
363 *eas = xive->eat[eas_idx];
364 return 0;
365 }
366
367 static int spapr_xive_get_end(XiveRouter *xrtr,
368 uint8_t end_blk, uint32_t end_idx, XiveEND *end)
369 {
370 SpaprXive *xive = SPAPR_XIVE(xrtr);
371
372 if (end_idx >= xive->nr_ends) {
373 return -1;
374 }
375
376 memcpy(end, &xive->endt[end_idx], sizeof(XiveEND));
377 return 0;
378 }
379
380 static int spapr_xive_write_end(XiveRouter *xrtr, uint8_t end_blk,
381 uint32_t end_idx, XiveEND *end,
382 uint8_t word_number)
383 {
384 SpaprXive *xive = SPAPR_XIVE(xrtr);
385
386 if (end_idx >= xive->nr_ends) {
387 return -1;
388 }
389
390 memcpy(&xive->endt[end_idx], end, sizeof(XiveEND));
391 return 0;
392 }
393
394 static int spapr_xive_get_nvt(XiveRouter *xrtr,
395 uint8_t nvt_blk, uint32_t nvt_idx, XiveNVT *nvt)
396 {
397 uint32_t vcpu_id = spapr_xive_nvt_to_target(nvt_blk, nvt_idx);
398 PowerPCCPU *cpu = spapr_find_cpu(vcpu_id);
399
400 if (!cpu) {
401 /* TODO: should we assert() if we can find a NVT ? */
402 return -1;
403 }
404
405 /*
406 * sPAPR does not maintain a NVT table. Return that the NVT is
407 * valid if we have found a matching CPU
408 */
409 nvt->w0 = cpu_to_be32(NVT_W0_VALID);
410 return 0;
411 }
412
413 static int spapr_xive_write_nvt(XiveRouter *xrtr, uint8_t nvt_blk,
414 uint32_t nvt_idx, XiveNVT *nvt,
415 uint8_t word_number)
416 {
417 /*
418 * We don't need to write back to the NVTs because the sPAPR
419 * machine should never hit a non-scheduled NVT. It should never
420 * get called.
421 */
422 g_assert_not_reached();
423 }
424
425 static XiveTCTX *spapr_xive_get_tctx(XiveRouter *xrtr, CPUState *cs)
426 {
427 PowerPCCPU *cpu = POWERPC_CPU(cs);
428
429 return spapr_cpu_state(cpu)->tctx;
430 }
431
432 static const VMStateDescription vmstate_spapr_xive_end = {
433 .name = TYPE_SPAPR_XIVE "/end",
434 .version_id = 1,
435 .minimum_version_id = 1,
436 .fields = (VMStateField []) {
437 VMSTATE_UINT32(w0, XiveEND),
438 VMSTATE_UINT32(w1, XiveEND),
439 VMSTATE_UINT32(w2, XiveEND),
440 VMSTATE_UINT32(w3, XiveEND),
441 VMSTATE_UINT32(w4, XiveEND),
442 VMSTATE_UINT32(w5, XiveEND),
443 VMSTATE_UINT32(w6, XiveEND),
444 VMSTATE_UINT32(w7, XiveEND),
445 VMSTATE_END_OF_LIST()
446 },
447 };
448
449 static const VMStateDescription vmstate_spapr_xive_eas = {
450 .name = TYPE_SPAPR_XIVE "/eas",
451 .version_id = 1,
452 .minimum_version_id = 1,
453 .fields = (VMStateField []) {
454 VMSTATE_UINT64(w, XiveEAS),
455 VMSTATE_END_OF_LIST()
456 },
457 };
458
459 static int vmstate_spapr_xive_pre_save(void *opaque)
460 {
461 if (kvm_irqchip_in_kernel()) {
462 return kvmppc_xive_pre_save(SPAPR_XIVE(opaque));
463 }
464
465 return 0;
466 }
467
468 /*
469 * Called by the sPAPR IRQ backend 'post_load' method at the machine
470 * level.
471 */
472 int spapr_xive_post_load(SpaprXive *xive, int version_id)
473 {
474 if (kvm_irqchip_in_kernel()) {
475 return kvmppc_xive_post_load(xive, version_id);
476 }
477
478 return 0;
479 }
480
481 static const VMStateDescription vmstate_spapr_xive = {
482 .name = TYPE_SPAPR_XIVE,
483 .version_id = 1,
484 .minimum_version_id = 1,
485 .pre_save = vmstate_spapr_xive_pre_save,
486 .post_load = NULL, /* handled at the machine level */
487 .fields = (VMStateField[]) {
488 VMSTATE_UINT32_EQUAL(nr_irqs, SpaprXive, NULL),
489 VMSTATE_STRUCT_VARRAY_POINTER_UINT32(eat, SpaprXive, nr_irqs,
490 vmstate_spapr_xive_eas, XiveEAS),
491 VMSTATE_STRUCT_VARRAY_POINTER_UINT32(endt, SpaprXive, nr_ends,
492 vmstate_spapr_xive_end, XiveEND),
493 VMSTATE_END_OF_LIST()
494 },
495 };
496
497 static Property spapr_xive_properties[] = {
498 DEFINE_PROP_UINT32("nr-irqs", SpaprXive, nr_irqs, 0),
499 DEFINE_PROP_UINT32("nr-ends", SpaprXive, nr_ends, 0),
500 DEFINE_PROP_UINT64("vc-base", SpaprXive, vc_base, SPAPR_XIVE_VC_BASE),
501 DEFINE_PROP_UINT64("tm-base", SpaprXive, tm_base, SPAPR_XIVE_TM_BASE),
502 DEFINE_PROP_END_OF_LIST(),
503 };
504
505 static void spapr_xive_class_init(ObjectClass *klass, void *data)
506 {
507 DeviceClass *dc = DEVICE_CLASS(klass);
508 XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass);
509
510 dc->desc = "sPAPR XIVE Interrupt Controller";
511 dc->props = spapr_xive_properties;
512 dc->realize = spapr_xive_realize;
513 dc->vmsd = &vmstate_spapr_xive;
514
515 xrc->get_eas = spapr_xive_get_eas;
516 xrc->get_end = spapr_xive_get_end;
517 xrc->write_end = spapr_xive_write_end;
518 xrc->get_nvt = spapr_xive_get_nvt;
519 xrc->write_nvt = spapr_xive_write_nvt;
520 xrc->get_tctx = spapr_xive_get_tctx;
521 }
522
523 static const TypeInfo spapr_xive_info = {
524 .name = TYPE_SPAPR_XIVE,
525 .parent = TYPE_XIVE_ROUTER,
526 .instance_init = spapr_xive_instance_init,
527 .instance_size = sizeof(SpaprXive),
528 .class_init = spapr_xive_class_init,
529 };
530
531 static void spapr_xive_register_types(void)
532 {
533 type_register_static(&spapr_xive_info);
534 }
535
536 type_init(spapr_xive_register_types)
537
538 bool spapr_xive_irq_claim(SpaprXive *xive, uint32_t lisn, bool lsi)
539 {
540 XiveSource *xsrc = &xive->source;
541
542 if (lisn >= xive->nr_irqs) {
543 return false;
544 }
545
546 xive->eat[lisn].w |= cpu_to_be64(EAS_VALID);
547 if (lsi) {
548 xive_source_irq_set_lsi(xsrc, lisn);
549 }
550
551 if (kvm_irqchip_in_kernel()) {
552 Error *local_err = NULL;
553
554 kvmppc_xive_source_reset_one(xsrc, lisn, &local_err);
555 if (local_err) {
556 error_report_err(local_err);
557 return false;
558 }
559 }
560
561 return true;
562 }
563
564 bool spapr_xive_irq_free(SpaprXive *xive, uint32_t lisn)
565 {
566 if (lisn >= xive->nr_irqs) {
567 return false;
568 }
569
570 xive->eat[lisn].w &= cpu_to_be64(~EAS_VALID);
571 return true;
572 }
573
574 /*
575 * XIVE hcalls
576 *
577 * The terminology used by the XIVE hcalls is the following :
578 *
579 * TARGET vCPU number
580 * EQ Event Queue assigned by OS to receive event data
581 * ESB page for source interrupt management
582 * LISN Logical Interrupt Source Number identifying a source in the
583 * machine
584 * EISN Effective Interrupt Source Number used by guest OS to
585 * identify source in the guest
586 *
587 * The EAS, END, NVT structures are not exposed.
588 */
589
590 /*
591 * Linux hosts under OPAL reserve priority 7 for their own escalation
592 * interrupts (DD2.X POWER9). So we only allow the guest to use
593 * priorities [0..6].
594 */
595 static bool spapr_xive_priority_is_reserved(uint8_t priority)
596 {
597 switch (priority) {
598 case 0 ... 6:
599 return false;
600 case 7: /* OPAL escalation queue */
601 default:
602 return true;
603 }
604 }
605
606 /*
607 * The H_INT_GET_SOURCE_INFO hcall() is used to obtain the logical
608 * real address of the MMIO page through which the Event State Buffer
609 * entry associated with the value of the "lisn" parameter is managed.
610 *
611 * Parameters:
612 * Input
613 * - R4: "flags"
614 * Bits 0-63 reserved
615 * - R5: "lisn" is per "interrupts", "interrupt-map", or
616 * "ibm,xive-lisn-ranges" properties, or as returned by the
617 * ibm,query-interrupt-source-number RTAS call, or as returned
618 * by the H_ALLOCATE_VAS_WINDOW hcall
619 *
620 * Output
621 * - R4: "flags"
622 * Bits 0-59: Reserved
623 * Bit 60: H_INT_ESB must be used for Event State Buffer
624 * management
625 * Bit 61: 1 == LSI 0 == MSI
626 * Bit 62: the full function page supports trigger
627 * Bit 63: Store EOI Supported
628 * - R5: Logical Real address of full function Event State Buffer
629 * management page, -1 if H_INT_ESB hcall flag is set to 1.
630 * - R6: Logical Real Address of trigger only Event State Buffer
631 * management page or -1.
632 * - R7: Power of 2 page size for the ESB management pages returned in
633 * R5 and R6.
634 */
635
636 #define SPAPR_XIVE_SRC_H_INT_ESB PPC_BIT(60) /* ESB manage with H_INT_ESB */
637 #define SPAPR_XIVE_SRC_LSI PPC_BIT(61) /* Virtual LSI type */
638 #define SPAPR_XIVE_SRC_TRIGGER PPC_BIT(62) /* Trigger and management
639 on same page */
640 #define SPAPR_XIVE_SRC_STORE_EOI PPC_BIT(63) /* Store EOI support */
641
642 static target_ulong h_int_get_source_info(PowerPCCPU *cpu,
643 SpaprMachineState *spapr,
644 target_ulong opcode,
645 target_ulong *args)
646 {
647 SpaprXive *xive = spapr->xive;
648 XiveSource *xsrc = &xive->source;
649 target_ulong flags = args[0];
650 target_ulong lisn = args[1];
651
652 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
653 return H_FUNCTION;
654 }
655
656 if (flags) {
657 return H_PARAMETER;
658 }
659
660 if (lisn >= xive->nr_irqs) {
661 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
662 lisn);
663 return H_P2;
664 }
665
666 if (!xive_eas_is_valid(&xive->eat[lisn])) {
667 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
668 lisn);
669 return H_P2;
670 }
671
672 /*
673 * All sources are emulated under the main XIVE object and share
674 * the same characteristics.
675 */
676 args[0] = 0;
677 if (!xive_source_esb_has_2page(xsrc)) {
678 args[0] |= SPAPR_XIVE_SRC_TRIGGER;
679 }
680 if (xsrc->esb_flags & XIVE_SRC_STORE_EOI) {
681 args[0] |= SPAPR_XIVE_SRC_STORE_EOI;
682 }
683
684 /*
685 * Force the use of the H_INT_ESB hcall in case of an LSI
686 * interrupt. This is necessary under KVM to re-trigger the
687 * interrupt if the level is still asserted
688 */
689 if (xive_source_irq_is_lsi(xsrc, lisn)) {
690 args[0] |= SPAPR_XIVE_SRC_H_INT_ESB | SPAPR_XIVE_SRC_LSI;
691 }
692
693 if (!(args[0] & SPAPR_XIVE_SRC_H_INT_ESB)) {
694 args[1] = xive->vc_base + xive_source_esb_mgmt(xsrc, lisn);
695 } else {
696 args[1] = -1;
697 }
698
699 if (xive_source_esb_has_2page(xsrc) &&
700 !(args[0] & SPAPR_XIVE_SRC_H_INT_ESB)) {
701 args[2] = xive->vc_base + xive_source_esb_page(xsrc, lisn);
702 } else {
703 args[2] = -1;
704 }
705
706 if (xive_source_esb_has_2page(xsrc)) {
707 args[3] = xsrc->esb_shift - 1;
708 } else {
709 args[3] = xsrc->esb_shift;
710 }
711
712 return H_SUCCESS;
713 }
714
715 /*
716 * The H_INT_SET_SOURCE_CONFIG hcall() is used to assign a Logical
717 * Interrupt Source to a target. The Logical Interrupt Source is
718 * designated with the "lisn" parameter and the target is designated
719 * with the "target" and "priority" parameters. Upon return from the
720 * hcall(), no additional interrupts will be directed to the old EQ.
721 *
722 * Parameters:
723 * Input:
724 * - R4: "flags"
725 * Bits 0-61: Reserved
726 * Bit 62: set the "eisn" in the EAS
727 * Bit 63: masks the interrupt source in the hardware interrupt
728 * control structure. An interrupt masked by this mechanism will
729 * be dropped, but it's source state bits will still be
730 * set. There is no race-free way of unmasking and restoring the
731 * source. Thus this should only be used in interrupts that are
732 * also masked at the source, and only in cases where the
733 * interrupt is not meant to be used for a large amount of time
734 * because no valid target exists for it for example
735 * - R5: "lisn" is per "interrupts", "interrupt-map", or
736 * "ibm,xive-lisn-ranges" properties, or as returned by the
737 * ibm,query-interrupt-source-number RTAS call, or as returned by
738 * the H_ALLOCATE_VAS_WINDOW hcall
739 * - R6: "target" is per "ibm,ppc-interrupt-server#s" or
740 * "ibm,ppc-interrupt-gserver#s"
741 * - R7: "priority" is a valid priority not in
742 * "ibm,plat-res-int-priorities"
743 * - R8: "eisn" is the guest EISN associated with the "lisn"
744 *
745 * Output:
746 * - None
747 */
748
749 #define SPAPR_XIVE_SRC_SET_EISN PPC_BIT(62)
750 #define SPAPR_XIVE_SRC_MASK PPC_BIT(63)
751
752 static target_ulong h_int_set_source_config(PowerPCCPU *cpu,
753 SpaprMachineState *spapr,
754 target_ulong opcode,
755 target_ulong *args)
756 {
757 SpaprXive *xive = spapr->xive;
758 XiveEAS eas, new_eas;
759 target_ulong flags = args[0];
760 target_ulong lisn = args[1];
761 target_ulong target = args[2];
762 target_ulong priority = args[3];
763 target_ulong eisn = args[4];
764 uint8_t end_blk;
765 uint32_t end_idx;
766
767 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
768 return H_FUNCTION;
769 }
770
771 if (flags & ~(SPAPR_XIVE_SRC_SET_EISN | SPAPR_XIVE_SRC_MASK)) {
772 return H_PARAMETER;
773 }
774
775 if (lisn >= xive->nr_irqs) {
776 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
777 lisn);
778 return H_P2;
779 }
780
781 eas = xive->eat[lisn];
782 if (!xive_eas_is_valid(&eas)) {
783 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
784 lisn);
785 return H_P2;
786 }
787
788 /* priority 0xff is used to reset the EAS */
789 if (priority == 0xff) {
790 new_eas.w = cpu_to_be64(EAS_VALID | EAS_MASKED);
791 goto out;
792 }
793
794 if (flags & SPAPR_XIVE_SRC_MASK) {
795 new_eas.w = eas.w | cpu_to_be64(EAS_MASKED);
796 } else {
797 new_eas.w = eas.w & cpu_to_be64(~EAS_MASKED);
798 }
799
800 if (spapr_xive_priority_is_reserved(priority)) {
801 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld
802 " is reserved\n", priority);
803 return H_P4;
804 }
805
806 /*
807 * Validate that "target" is part of the list of threads allocated
808 * to the partition. For that, find the END corresponding to the
809 * target.
810 */
811 if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) {
812 return H_P3;
813 }
814
815 new_eas.w = xive_set_field64(EAS_END_BLOCK, new_eas.w, end_blk);
816 new_eas.w = xive_set_field64(EAS_END_INDEX, new_eas.w, end_idx);
817
818 if (flags & SPAPR_XIVE_SRC_SET_EISN) {
819 new_eas.w = xive_set_field64(EAS_END_DATA, new_eas.w, eisn);
820 }
821
822 if (kvm_irqchip_in_kernel()) {
823 Error *local_err = NULL;
824
825 kvmppc_xive_set_source_config(xive, lisn, &new_eas, &local_err);
826 if (local_err) {
827 error_report_err(local_err);
828 return H_HARDWARE;
829 }
830 }
831
832 out:
833 xive->eat[lisn] = new_eas;
834 return H_SUCCESS;
835 }
836
837 /*
838 * The H_INT_GET_SOURCE_CONFIG hcall() is used to determine to which
839 * target/priority pair is assigned to the specified Logical Interrupt
840 * Source.
841 *
842 * Parameters:
843 * Input:
844 * - R4: "flags"
845 * Bits 0-63 Reserved
846 * - R5: "lisn" is per "interrupts", "interrupt-map", or
847 * "ibm,xive-lisn-ranges" properties, or as returned by the
848 * ibm,query-interrupt-source-number RTAS call, or as
849 * returned by the H_ALLOCATE_VAS_WINDOW hcall
850 *
851 * Output:
852 * - R4: Target to which the specified Logical Interrupt Source is
853 * assigned
854 * - R5: Priority to which the specified Logical Interrupt Source is
855 * assigned
856 * - R6: EISN for the specified Logical Interrupt Source (this will be
857 * equivalent to the LISN if not changed by H_INT_SET_SOURCE_CONFIG)
858 */
859 static target_ulong h_int_get_source_config(PowerPCCPU *cpu,
860 SpaprMachineState *spapr,
861 target_ulong opcode,
862 target_ulong *args)
863 {
864 SpaprXive *xive = spapr->xive;
865 target_ulong flags = args[0];
866 target_ulong lisn = args[1];
867 XiveEAS eas;
868 XiveEND *end;
869 uint8_t nvt_blk;
870 uint32_t end_idx, nvt_idx;
871
872 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
873 return H_FUNCTION;
874 }
875
876 if (flags) {
877 return H_PARAMETER;
878 }
879
880 if (lisn >= xive->nr_irqs) {
881 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
882 lisn);
883 return H_P2;
884 }
885
886 eas = xive->eat[lisn];
887 if (!xive_eas_is_valid(&eas)) {
888 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
889 lisn);
890 return H_P2;
891 }
892
893 /* EAS_END_BLOCK is unused on sPAPR */
894 end_idx = xive_get_field64(EAS_END_INDEX, eas.w);
895
896 assert(end_idx < xive->nr_ends);
897 end = &xive->endt[end_idx];
898
899 nvt_blk = xive_get_field32(END_W6_NVT_BLOCK, end->w6);
900 nvt_idx = xive_get_field32(END_W6_NVT_INDEX, end->w6);
901 args[0] = spapr_xive_nvt_to_target(nvt_blk, nvt_idx);
902
903 if (xive_eas_is_masked(&eas)) {
904 args[1] = 0xff;
905 } else {
906 args[1] = xive_get_field32(END_W7_F0_PRIORITY, end->w7);
907 }
908
909 args[2] = xive_get_field64(EAS_END_DATA, eas.w);
910
911 return H_SUCCESS;
912 }
913
914 /*
915 * The H_INT_GET_QUEUE_INFO hcall() is used to get the logical real
916 * address of the notification management page associated with the
917 * specified target and priority.
918 *
919 * Parameters:
920 * Input:
921 * - R4: "flags"
922 * Bits 0-63 Reserved
923 * - R5: "target" is per "ibm,ppc-interrupt-server#s" or
924 * "ibm,ppc-interrupt-gserver#s"
925 * - R6: "priority" is a valid priority not in
926 * "ibm,plat-res-int-priorities"
927 *
928 * Output:
929 * - R4: Logical real address of notification page
930 * - R5: Power of 2 page size of the notification page
931 */
932 static target_ulong h_int_get_queue_info(PowerPCCPU *cpu,
933 SpaprMachineState *spapr,
934 target_ulong opcode,
935 target_ulong *args)
936 {
937 SpaprXive *xive = spapr->xive;
938 XiveENDSource *end_xsrc = &xive->end_source;
939 target_ulong flags = args[0];
940 target_ulong target = args[1];
941 target_ulong priority = args[2];
942 XiveEND *end;
943 uint8_t end_blk;
944 uint32_t end_idx;
945
946 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
947 return H_FUNCTION;
948 }
949
950 if (flags) {
951 return H_PARAMETER;
952 }
953
954 /*
955 * H_STATE should be returned if a H_INT_RESET is in progress.
956 * This is not needed when running the emulation under QEMU
957 */
958
959 if (spapr_xive_priority_is_reserved(priority)) {
960 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld
961 " is reserved\n", priority);
962 return H_P3;
963 }
964
965 /*
966 * Validate that "target" is part of the list of threads allocated
967 * to the partition. For that, find the END corresponding to the
968 * target.
969 */
970 if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) {
971 return H_P2;
972 }
973
974 assert(end_idx < xive->nr_ends);
975 end = &xive->endt[end_idx];
976
977 args[0] = xive->end_base + (1ull << (end_xsrc->esb_shift + 1)) * end_idx;
978 if (xive_end_is_enqueue(end)) {
979 args[1] = xive_get_field32(END_W0_QSIZE, end->w0) + 12;
980 } else {
981 args[1] = 0;
982 }
983
984 return H_SUCCESS;
985 }
986
987 /*
988 * The H_INT_SET_QUEUE_CONFIG hcall() is used to set or reset a EQ for
989 * a given "target" and "priority". It is also used to set the
990 * notification config associated with the EQ. An EQ size of 0 is
991 * used to reset the EQ config for a given target and priority. If
992 * resetting the EQ config, the END associated with the given "target"
993 * and "priority" will be changed to disable queueing.
994 *
995 * Upon return from the hcall(), no additional interrupts will be
996 * directed to the old EQ (if one was set). The old EQ (if one was
997 * set) should be investigated for interrupts that occurred prior to
998 * or during the hcall().
999 *
1000 * Parameters:
1001 * Input:
1002 * - R4: "flags"
1003 * Bits 0-62: Reserved
1004 * Bit 63: Unconditional Notify (n) per the XIVE spec
1005 * - R5: "target" is per "ibm,ppc-interrupt-server#s" or
1006 * "ibm,ppc-interrupt-gserver#s"
1007 * - R6: "priority" is a valid priority not in
1008 * "ibm,plat-res-int-priorities"
1009 * - R7: "eventQueue": The logical real address of the start of the EQ
1010 * - R8: "eventQueueSize": The power of 2 EQ size per "ibm,xive-eq-sizes"
1011 *
1012 * Output:
1013 * - None
1014 */
1015
1016 #define SPAPR_XIVE_END_ALWAYS_NOTIFY PPC_BIT(63)
1017
1018 static target_ulong h_int_set_queue_config(PowerPCCPU *cpu,
1019 SpaprMachineState *spapr,
1020 target_ulong opcode,
1021 target_ulong *args)
1022 {
1023 SpaprXive *xive = spapr->xive;
1024 target_ulong flags = args[0];
1025 target_ulong target = args[1];
1026 target_ulong priority = args[2];
1027 target_ulong qpage = args[3];
1028 target_ulong qsize = args[4];
1029 XiveEND end;
1030 uint8_t end_blk, nvt_blk;
1031 uint32_t end_idx, nvt_idx;
1032
1033 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1034 return H_FUNCTION;
1035 }
1036
1037 if (flags & ~SPAPR_XIVE_END_ALWAYS_NOTIFY) {
1038 return H_PARAMETER;
1039 }
1040
1041 /*
1042 * H_STATE should be returned if a H_INT_RESET is in progress.
1043 * This is not needed when running the emulation under QEMU
1044 */
1045
1046 if (spapr_xive_priority_is_reserved(priority)) {
1047 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld
1048 " is reserved\n", priority);
1049 return H_P3;
1050 }
1051
1052 /*
1053 * Validate that "target" is part of the list of threads allocated
1054 * to the partition. For that, find the END corresponding to the
1055 * target.
1056 */
1057
1058 if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) {
1059 return H_P2;
1060 }
1061
1062 assert(end_idx < xive->nr_ends);
1063 memcpy(&end, &xive->endt[end_idx], sizeof(XiveEND));
1064
1065 switch (qsize) {
1066 case 12:
1067 case 16:
1068 case 21:
1069 case 24:
1070 if (!QEMU_IS_ALIGNED(qpage, 1ul << qsize)) {
1071 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: EQ @0x%" HWADDR_PRIx
1072 " is not naturally aligned with %" HWADDR_PRIx "\n",
1073 qpage, (hwaddr)1 << qsize);
1074 return H_P4;
1075 }
1076 end.w2 = cpu_to_be32((qpage >> 32) & 0x0fffffff);
1077 end.w3 = cpu_to_be32(qpage & 0xffffffff);
1078 end.w0 |= cpu_to_be32(END_W0_ENQUEUE);
1079 end.w0 = xive_set_field32(END_W0_QSIZE, end.w0, qsize - 12);
1080 break;
1081 case 0:
1082 /* reset queue and disable queueing */
1083 spapr_xive_end_reset(&end);
1084 goto out;
1085
1086 default:
1087 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid EQ size %"PRIx64"\n",
1088 qsize);
1089 return H_P5;
1090 }
1091
1092 if (qsize) {
1093 hwaddr plen = 1 << qsize;
1094 void *eq;
1095
1096 /*
1097 * Validate the guest EQ. We should also check that the queue
1098 * has been zeroed by the OS.
1099 */
1100 eq = address_space_map(CPU(cpu)->as, qpage, &plen, true,
1101 MEMTXATTRS_UNSPECIFIED);
1102 if (plen != 1 << qsize) {
1103 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to map EQ @0x%"
1104 HWADDR_PRIx "\n", qpage);
1105 return H_P4;
1106 }
1107 address_space_unmap(CPU(cpu)->as, eq, plen, true, plen);
1108 }
1109
1110 /* "target" should have been validated above */
1111 if (spapr_xive_target_to_nvt(target, &nvt_blk, &nvt_idx)) {
1112 g_assert_not_reached();
1113 }
1114
1115 /*
1116 * Ensure the priority and target are correctly set (they will not
1117 * be right after allocation)
1118 */
1119 end.w6 = xive_set_field32(END_W6_NVT_BLOCK, 0ul, nvt_blk) |
1120 xive_set_field32(END_W6_NVT_INDEX, 0ul, nvt_idx);
1121 end.w7 = xive_set_field32(END_W7_F0_PRIORITY, 0ul, priority);
1122
1123 if (flags & SPAPR_XIVE_END_ALWAYS_NOTIFY) {
1124 end.w0 |= cpu_to_be32(END_W0_UCOND_NOTIFY);
1125 } else {
1126 end.w0 &= cpu_to_be32((uint32_t)~END_W0_UCOND_NOTIFY);
1127 }
1128
1129 /*
1130 * The generation bit for the END starts at 1 and The END page
1131 * offset counter starts at 0.
1132 */
1133 end.w1 = cpu_to_be32(END_W1_GENERATION) |
1134 xive_set_field32(END_W1_PAGE_OFF, 0ul, 0ul);
1135 end.w0 |= cpu_to_be32(END_W0_VALID);
1136
1137 /*
1138 * TODO: issue syncs required to ensure all in-flight interrupts
1139 * are complete on the old END
1140 */
1141
1142 out:
1143 if (kvm_irqchip_in_kernel()) {
1144 Error *local_err = NULL;
1145
1146 kvmppc_xive_set_queue_config(xive, end_blk, end_idx, &end, &local_err);
1147 if (local_err) {
1148 error_report_err(local_err);
1149 return H_HARDWARE;
1150 }
1151 }
1152
1153 /* Update END */
1154 memcpy(&xive->endt[end_idx], &end, sizeof(XiveEND));
1155 return H_SUCCESS;
1156 }
1157
1158 /*
1159 * The H_INT_GET_QUEUE_CONFIG hcall() is used to get a EQ for a given
1160 * target and priority.
1161 *
1162 * Parameters:
1163 * Input:
1164 * - R4: "flags"
1165 * Bits 0-62: Reserved
1166 * Bit 63: Debug: Return debug data
1167 * - R5: "target" is per "ibm,ppc-interrupt-server#s" or
1168 * "ibm,ppc-interrupt-gserver#s"
1169 * - R6: "priority" is a valid priority not in
1170 * "ibm,plat-res-int-priorities"
1171 *
1172 * Output:
1173 * - R4: "flags":
1174 * Bits 0-61: Reserved
1175 * Bit 62: The value of Event Queue Generation Number (g) per
1176 * the XIVE spec if "Debug" = 1
1177 * Bit 63: The value of Unconditional Notify (n) per the XIVE spec
1178 * - R5: The logical real address of the start of the EQ
1179 * - R6: The power of 2 EQ size per "ibm,xive-eq-sizes"
1180 * - R7: The value of Event Queue Offset Counter per XIVE spec
1181 * if "Debug" = 1, else 0
1182 *
1183 */
1184
1185 #define SPAPR_XIVE_END_DEBUG PPC_BIT(63)
1186
1187 static target_ulong h_int_get_queue_config(PowerPCCPU *cpu,
1188 SpaprMachineState *spapr,
1189 target_ulong opcode,
1190 target_ulong *args)
1191 {
1192 SpaprXive *xive = spapr->xive;
1193 target_ulong flags = args[0];
1194 target_ulong target = args[1];
1195 target_ulong priority = args[2];
1196 XiveEND *end;
1197 uint8_t end_blk;
1198 uint32_t end_idx;
1199
1200 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1201 return H_FUNCTION;
1202 }
1203
1204 if (flags & ~SPAPR_XIVE_END_DEBUG) {
1205 return H_PARAMETER;
1206 }
1207
1208 /*
1209 * H_STATE should be returned if a H_INT_RESET is in progress.
1210 * This is not needed when running the emulation under QEMU
1211 */
1212
1213 if (spapr_xive_priority_is_reserved(priority)) {
1214 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld
1215 " is reserved\n", priority);
1216 return H_P3;
1217 }
1218
1219 /*
1220 * Validate that "target" is part of the list of threads allocated
1221 * to the partition. For that, find the END corresponding to the
1222 * target.
1223 */
1224 if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) {
1225 return H_P2;
1226 }
1227
1228 assert(end_idx < xive->nr_ends);
1229 end = &xive->endt[end_idx];
1230
1231 args[0] = 0;
1232 if (xive_end_is_notify(end)) {
1233 args[0] |= SPAPR_XIVE_END_ALWAYS_NOTIFY;
1234 }
1235
1236 if (xive_end_is_enqueue(end)) {
1237 args[1] = xive_end_qaddr(end);
1238 args[2] = xive_get_field32(END_W0_QSIZE, end->w0) + 12;
1239 } else {
1240 args[1] = 0;
1241 args[2] = 0;
1242 }
1243
1244 if (kvm_irqchip_in_kernel()) {
1245 Error *local_err = NULL;
1246
1247 kvmppc_xive_get_queue_config(xive, end_blk, end_idx, end, &local_err);
1248 if (local_err) {
1249 error_report_err(local_err);
1250 return H_HARDWARE;
1251 }
1252 }
1253
1254 /* TODO: do we need any locking on the END ? */
1255 if (flags & SPAPR_XIVE_END_DEBUG) {
1256 /* Load the event queue generation number into the return flags */
1257 args[0] |= (uint64_t)xive_get_field32(END_W1_GENERATION, end->w1) << 62;
1258
1259 /* Load R7 with the event queue offset counter */
1260 args[3] = xive_get_field32(END_W1_PAGE_OFF, end->w1);
1261 } else {
1262 args[3] = 0;
1263 }
1264
1265 return H_SUCCESS;
1266 }
1267
1268 /*
1269 * The H_INT_SET_OS_REPORTING_LINE hcall() is used to set the
1270 * reporting cache line pair for the calling thread. The reporting
1271 * cache lines will contain the OS interrupt context when the OS
1272 * issues a CI store byte to @TIMA+0xC10 to acknowledge the OS
1273 * interrupt. The reporting cache lines can be reset by inputting -1
1274 * in "reportingLine". Issuing the CI store byte without reporting
1275 * cache lines registered will result in the data not being accessible
1276 * to the OS.
1277 *
1278 * Parameters:
1279 * Input:
1280 * - R4: "flags"
1281 * Bits 0-63: Reserved
1282 * - R5: "reportingLine": The logical real address of the reporting cache
1283 * line pair
1284 *
1285 * Output:
1286 * - None
1287 */
1288 static target_ulong h_int_set_os_reporting_line(PowerPCCPU *cpu,
1289 SpaprMachineState *spapr,
1290 target_ulong opcode,
1291 target_ulong *args)
1292 {
1293 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1294 return H_FUNCTION;
1295 }
1296
1297 /*
1298 * H_STATE should be returned if a H_INT_RESET is in progress.
1299 * This is not needed when running the emulation under QEMU
1300 */
1301
1302 /* TODO: H_INT_SET_OS_REPORTING_LINE */
1303 return H_FUNCTION;
1304 }
1305
1306 /*
1307 * The H_INT_GET_OS_REPORTING_LINE hcall() is used to get the logical
1308 * real address of the reporting cache line pair set for the input
1309 * "target". If no reporting cache line pair has been set, -1 is
1310 * returned.
1311 *
1312 * Parameters:
1313 * Input:
1314 * - R4: "flags"
1315 * Bits 0-63: Reserved
1316 * - R5: "target" is per "ibm,ppc-interrupt-server#s" or
1317 * "ibm,ppc-interrupt-gserver#s"
1318 * - R6: "reportingLine": The logical real address of the reporting
1319 * cache line pair
1320 *
1321 * Output:
1322 * - R4: The logical real address of the reporting line if set, else -1
1323 */
1324 static target_ulong h_int_get_os_reporting_line(PowerPCCPU *cpu,
1325 SpaprMachineState *spapr,
1326 target_ulong opcode,
1327 target_ulong *args)
1328 {
1329 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1330 return H_FUNCTION;
1331 }
1332
1333 /*
1334 * H_STATE should be returned if a H_INT_RESET is in progress.
1335 * This is not needed when running the emulation under QEMU
1336 */
1337
1338 /* TODO: H_INT_GET_OS_REPORTING_LINE */
1339 return H_FUNCTION;
1340 }
1341
1342 /*
1343 * The H_INT_ESB hcall() is used to issue a load or store to the ESB
1344 * page for the input "lisn". This hcall is only supported for LISNs
1345 * that have the ESB hcall flag set to 1 when returned from hcall()
1346 * H_INT_GET_SOURCE_INFO.
1347 *
1348 * Parameters:
1349 * Input:
1350 * - R4: "flags"
1351 * Bits 0-62: Reserved
1352 * bit 63: Store: Store=1, store operation, else load operation
1353 * - R5: "lisn" is per "interrupts", "interrupt-map", or
1354 * "ibm,xive-lisn-ranges" properties, or as returned by the
1355 * ibm,query-interrupt-source-number RTAS call, or as
1356 * returned by the H_ALLOCATE_VAS_WINDOW hcall
1357 * - R6: "esbOffset" is the offset into the ESB page for the load or
1358 * store operation
1359 * - R7: "storeData" is the data to write for a store operation
1360 *
1361 * Output:
1362 * - R4: The value of the load if load operation, else -1
1363 */
1364
1365 #define SPAPR_XIVE_ESB_STORE PPC_BIT(63)
1366
1367 static target_ulong h_int_esb(PowerPCCPU *cpu,
1368 SpaprMachineState *spapr,
1369 target_ulong opcode,
1370 target_ulong *args)
1371 {
1372 SpaprXive *xive = spapr->xive;
1373 XiveEAS eas;
1374 target_ulong flags = args[0];
1375 target_ulong lisn = args[1];
1376 target_ulong offset = args[2];
1377 target_ulong data = args[3];
1378 hwaddr mmio_addr;
1379 XiveSource *xsrc = &xive->source;
1380
1381 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1382 return H_FUNCTION;
1383 }
1384
1385 if (flags & ~SPAPR_XIVE_ESB_STORE) {
1386 return H_PARAMETER;
1387 }
1388
1389 if (lisn >= xive->nr_irqs) {
1390 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
1391 lisn);
1392 return H_P2;
1393 }
1394
1395 eas = xive->eat[lisn];
1396 if (!xive_eas_is_valid(&eas)) {
1397 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
1398 lisn);
1399 return H_P2;
1400 }
1401
1402 if (offset > (1ull << xsrc->esb_shift)) {
1403 return H_P3;
1404 }
1405
1406 if (kvm_irqchip_in_kernel()) {
1407 args[0] = kvmppc_xive_esb_rw(xsrc, lisn, offset, data,
1408 flags & SPAPR_XIVE_ESB_STORE);
1409 } else {
1410 mmio_addr = xive->vc_base + xive_source_esb_mgmt(xsrc, lisn) + offset;
1411
1412 if (dma_memory_rw(&address_space_memory, mmio_addr, &data, 8,
1413 (flags & SPAPR_XIVE_ESB_STORE))) {
1414 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to access ESB @0x%"
1415 HWADDR_PRIx "\n", mmio_addr);
1416 return H_HARDWARE;
1417 }
1418 args[0] = (flags & SPAPR_XIVE_ESB_STORE) ? -1 : data;
1419 }
1420 return H_SUCCESS;
1421 }
1422
1423 /*
1424 * The H_INT_SYNC hcall() is used to issue hardware syncs that will
1425 * ensure any in flight events for the input lisn are in the event
1426 * queue.
1427 *
1428 * Parameters:
1429 * Input:
1430 * - R4: "flags"
1431 * Bits 0-63: Reserved
1432 * - R5: "lisn" is per "interrupts", "interrupt-map", or
1433 * "ibm,xive-lisn-ranges" properties, or as returned by the
1434 * ibm,query-interrupt-source-number RTAS call, or as
1435 * returned by the H_ALLOCATE_VAS_WINDOW hcall
1436 *
1437 * Output:
1438 * - None
1439 */
1440 static target_ulong h_int_sync(PowerPCCPU *cpu,
1441 SpaprMachineState *spapr,
1442 target_ulong opcode,
1443 target_ulong *args)
1444 {
1445 SpaprXive *xive = spapr->xive;
1446 XiveEAS eas;
1447 target_ulong flags = args[0];
1448 target_ulong lisn = args[1];
1449
1450 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1451 return H_FUNCTION;
1452 }
1453
1454 if (flags) {
1455 return H_PARAMETER;
1456 }
1457
1458 if (lisn >= xive->nr_irqs) {
1459 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
1460 lisn);
1461 return H_P2;
1462 }
1463
1464 eas = xive->eat[lisn];
1465 if (!xive_eas_is_valid(&eas)) {
1466 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
1467 lisn);
1468 return H_P2;
1469 }
1470
1471 /*
1472 * H_STATE should be returned if a H_INT_RESET is in progress.
1473 * This is not needed when running the emulation under QEMU
1474 */
1475
1476 /*
1477 * This is not real hardware. Nothing to be done unless when
1478 * under KVM
1479 */
1480
1481 if (kvm_irqchip_in_kernel()) {
1482 Error *local_err = NULL;
1483
1484 kvmppc_xive_sync_source(xive, lisn, &local_err);
1485 if (local_err) {
1486 error_report_err(local_err);
1487 return H_HARDWARE;
1488 }
1489 }
1490 return H_SUCCESS;
1491 }
1492
1493 /*
1494 * The H_INT_RESET hcall() is used to reset all of the partition's
1495 * interrupt exploitation structures to their initial state. This
1496 * means losing all previously set interrupt state set via
1497 * H_INT_SET_SOURCE_CONFIG and H_INT_SET_QUEUE_CONFIG.
1498 *
1499 * Parameters:
1500 * Input:
1501 * - R4: "flags"
1502 * Bits 0-63: Reserved
1503 *
1504 * Output:
1505 * - None
1506 */
1507 static target_ulong h_int_reset(PowerPCCPU *cpu,
1508 SpaprMachineState *spapr,
1509 target_ulong opcode,
1510 target_ulong *args)
1511 {
1512 SpaprXive *xive = spapr->xive;
1513 target_ulong flags = args[0];
1514
1515 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1516 return H_FUNCTION;
1517 }
1518
1519 if (flags) {
1520 return H_PARAMETER;
1521 }
1522
1523 device_reset(DEVICE(xive));
1524
1525 if (kvm_irqchip_in_kernel()) {
1526 Error *local_err = NULL;
1527
1528 kvmppc_xive_reset(xive, &local_err);
1529 if (local_err) {
1530 error_report_err(local_err);
1531 return H_HARDWARE;
1532 }
1533 }
1534 return H_SUCCESS;
1535 }
1536
1537 void spapr_xive_hcall_init(SpaprMachineState *spapr)
1538 {
1539 spapr_register_hypercall(H_INT_GET_SOURCE_INFO, h_int_get_source_info);
1540 spapr_register_hypercall(H_INT_SET_SOURCE_CONFIG, h_int_set_source_config);
1541 spapr_register_hypercall(H_INT_GET_SOURCE_CONFIG, h_int_get_source_config);
1542 spapr_register_hypercall(H_INT_GET_QUEUE_INFO, h_int_get_queue_info);
1543 spapr_register_hypercall(H_INT_SET_QUEUE_CONFIG, h_int_set_queue_config);
1544 spapr_register_hypercall(H_INT_GET_QUEUE_CONFIG, h_int_get_queue_config);
1545 spapr_register_hypercall(H_INT_SET_OS_REPORTING_LINE,
1546 h_int_set_os_reporting_line);
1547 spapr_register_hypercall(H_INT_GET_OS_REPORTING_LINE,
1548 h_int_get_os_reporting_line);
1549 spapr_register_hypercall(H_INT_ESB, h_int_esb);
1550 spapr_register_hypercall(H_INT_SYNC, h_int_sync);
1551 spapr_register_hypercall(H_INT_RESET, h_int_reset);
1552 }
1553
1554 void spapr_dt_xive(SpaprMachineState *spapr, uint32_t nr_servers, void *fdt,
1555 uint32_t phandle)
1556 {
1557 SpaprXive *xive = spapr->xive;
1558 int node;
1559 uint64_t timas[2 * 2];
1560 /* Interrupt number ranges for the IPIs */
1561 uint32_t lisn_ranges[] = {
1562 cpu_to_be32(0),
1563 cpu_to_be32(nr_servers),
1564 };
1565 /*
1566 * EQ size - the sizes of pages supported by the system 4K, 64K,
1567 * 2M, 16M. We only advertise 64K for the moment.
1568 */
1569 uint32_t eq_sizes[] = {
1570 cpu_to_be32(16), /* 64K */
1571 };
1572 /*
1573 * The following array is in sync with the reserved priorities
1574 * defined by the 'spapr_xive_priority_is_reserved' routine.
1575 */
1576 uint32_t plat_res_int_priorities[] = {
1577 cpu_to_be32(7), /* start */
1578 cpu_to_be32(0xf8), /* count */
1579 };
1580
1581 /* Thread Interrupt Management Area : User (ring 3) and OS (ring 2) */
1582 timas[0] = cpu_to_be64(xive->tm_base +
1583 XIVE_TM_USER_PAGE * (1ull << TM_SHIFT));
1584 timas[1] = cpu_to_be64(1ull << TM_SHIFT);
1585 timas[2] = cpu_to_be64(xive->tm_base +
1586 XIVE_TM_OS_PAGE * (1ull << TM_SHIFT));
1587 timas[3] = cpu_to_be64(1ull << TM_SHIFT);
1588
1589 _FDT(node = fdt_add_subnode(fdt, 0, xive->nodename));
1590
1591 _FDT(fdt_setprop_string(fdt, node, "device_type", "power-ivpe"));
1592 _FDT(fdt_setprop(fdt, node, "reg", timas, sizeof(timas)));
1593
1594 _FDT(fdt_setprop_string(fdt, node, "compatible", "ibm,power-ivpe"));
1595 _FDT(fdt_setprop(fdt, node, "ibm,xive-eq-sizes", eq_sizes,
1596 sizeof(eq_sizes)));
1597 _FDT(fdt_setprop(fdt, node, "ibm,xive-lisn-ranges", lisn_ranges,
1598 sizeof(lisn_ranges)));
1599
1600 /* For Linux to link the LSIs to the interrupt controller. */
1601 _FDT(fdt_setprop(fdt, node, "interrupt-controller", NULL, 0));
1602 _FDT(fdt_setprop_cell(fdt, node, "#interrupt-cells", 2));
1603
1604 /* For SLOF */
1605 _FDT(fdt_setprop_cell(fdt, node, "linux,phandle", phandle));
1606 _FDT(fdt_setprop_cell(fdt, node, "phandle", phandle));
1607
1608 /*
1609 * The "ibm,plat-res-int-priorities" property defines the priority
1610 * ranges reserved by the hypervisor
1611 */
1612 _FDT(fdt_setprop(fdt, 0, "ibm,plat-res-int-priorities",
1613 plat_res_int_priorities, sizeof(plat_res_int_priorities)));
1614 }