]> git.proxmox.com Git - mirror_qemu.git/blob - hw/intc/spapr_xive.c
Merge remote-tracking branch 'remotes/vivier2/tags/trivial-branch-for-6.0-pull-reques...
[mirror_qemu.git] / hw / intc / spapr_xive.c
1 /*
2 * QEMU PowerPC sPAPR XIVE interrupt controller model
3 *
4 * Copyright (c) 2017-2018, IBM Corporation.
5 *
6 * This code is licensed under the GPL version 2 or later. See the
7 * COPYING file in the top-level directory.
8 */
9
10 #include "qemu/osdep.h"
11 #include "qemu/log.h"
12 #include "qemu/module.h"
13 #include "qapi/error.h"
14 #include "qemu/error-report.h"
15 #include "target/ppc/cpu.h"
16 #include "sysemu/cpus.h"
17 #include "sysemu/reset.h"
18 #include "migration/vmstate.h"
19 #include "monitor/monitor.h"
20 #include "hw/ppc/fdt.h"
21 #include "hw/ppc/spapr.h"
22 #include "hw/ppc/spapr_cpu_core.h"
23 #include "hw/ppc/spapr_xive.h"
24 #include "hw/ppc/xive.h"
25 #include "hw/ppc/xive_regs.h"
26 #include "hw/qdev-properties.h"
27 #include "trace.h"
28
29 /*
30 * XIVE Virtualization Controller BAR and Thread Managment BAR that we
31 * use for the ESB pages and the TIMA pages
32 */
33 #define SPAPR_XIVE_VC_BASE 0x0006010000000000ull
34 #define SPAPR_XIVE_TM_BASE 0x0006030203180000ull
35
36 /*
37 * The allocation of VP blocks is a complex operation in OPAL and the
38 * VP identifiers have a relation with the number of HW chips, the
39 * size of the VP blocks, VP grouping, etc. The QEMU sPAPR XIVE
40 * controller model does not have the same constraints and can use a
41 * simple mapping scheme of the CPU vcpu_id
42 *
43 * These identifiers are never returned to the OS.
44 */
45
46 #define SPAPR_XIVE_NVT_BASE 0x400
47
48 /*
49 * sPAPR NVT and END indexing helpers
50 */
51 static uint32_t spapr_xive_nvt_to_target(uint8_t nvt_blk, uint32_t nvt_idx)
52 {
53 return nvt_idx - SPAPR_XIVE_NVT_BASE;
54 }
55
56 static void spapr_xive_cpu_to_nvt(PowerPCCPU *cpu,
57 uint8_t *out_nvt_blk, uint32_t *out_nvt_idx)
58 {
59 assert(cpu);
60
61 if (out_nvt_blk) {
62 *out_nvt_blk = SPAPR_XIVE_BLOCK_ID;
63 }
64
65 if (out_nvt_blk) {
66 *out_nvt_idx = SPAPR_XIVE_NVT_BASE + cpu->vcpu_id;
67 }
68 }
69
70 static int spapr_xive_target_to_nvt(uint32_t target,
71 uint8_t *out_nvt_blk, uint32_t *out_nvt_idx)
72 {
73 PowerPCCPU *cpu = spapr_find_cpu(target);
74
75 if (!cpu) {
76 return -1;
77 }
78
79 spapr_xive_cpu_to_nvt(cpu, out_nvt_blk, out_nvt_idx);
80 return 0;
81 }
82
83 /*
84 * sPAPR END indexing uses a simple mapping of the CPU vcpu_id, 8
85 * priorities per CPU
86 */
87 int spapr_xive_end_to_target(uint8_t end_blk, uint32_t end_idx,
88 uint32_t *out_server, uint8_t *out_prio)
89 {
90
91 assert(end_blk == SPAPR_XIVE_BLOCK_ID);
92
93 if (out_server) {
94 *out_server = end_idx >> 3;
95 }
96
97 if (out_prio) {
98 *out_prio = end_idx & 0x7;
99 }
100 return 0;
101 }
102
103 static void spapr_xive_cpu_to_end(PowerPCCPU *cpu, uint8_t prio,
104 uint8_t *out_end_blk, uint32_t *out_end_idx)
105 {
106 assert(cpu);
107
108 if (out_end_blk) {
109 *out_end_blk = SPAPR_XIVE_BLOCK_ID;
110 }
111
112 if (out_end_idx) {
113 *out_end_idx = (cpu->vcpu_id << 3) + prio;
114 }
115 }
116
117 static int spapr_xive_target_to_end(uint32_t target, uint8_t prio,
118 uint8_t *out_end_blk, uint32_t *out_end_idx)
119 {
120 PowerPCCPU *cpu = spapr_find_cpu(target);
121
122 if (!cpu) {
123 return -1;
124 }
125
126 spapr_xive_cpu_to_end(cpu, prio, out_end_blk, out_end_idx);
127 return 0;
128 }
129
130 /*
131 * On sPAPR machines, use a simplified output for the XIVE END
132 * structure dumping only the information related to the OS EQ.
133 */
134 static void spapr_xive_end_pic_print_info(SpaprXive *xive, XiveEND *end,
135 Monitor *mon)
136 {
137 uint64_t qaddr_base = xive_end_qaddr(end);
138 uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
139 uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1);
140 uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
141 uint32_t qentries = 1 << (qsize + 10);
142 uint32_t nvt = xive_get_field32(END_W6_NVT_INDEX, end->w6);
143 uint8_t priority = xive_get_field32(END_W7_F0_PRIORITY, end->w7);
144
145 monitor_printf(mon, "%3d/%d % 6d/%5d @%"PRIx64" ^%d",
146 spapr_xive_nvt_to_target(0, nvt),
147 priority, qindex, qentries, qaddr_base, qgen);
148
149 xive_end_queue_pic_print_info(end, 6, mon);
150 }
151
152 /*
153 * kvm_irqchip_in_kernel() will cause the compiler to turn this
154 * info a nop if CONFIG_KVM isn't defined.
155 */
156 #define spapr_xive_in_kernel(xive) \
157 (kvm_irqchip_in_kernel() && (xive)->fd != -1)
158
159 void spapr_xive_pic_print_info(SpaprXive *xive, Monitor *mon)
160 {
161 XiveSource *xsrc = &xive->source;
162 int i;
163
164 if (spapr_xive_in_kernel(xive)) {
165 Error *local_err = NULL;
166
167 kvmppc_xive_synchronize_state(xive, &local_err);
168 if (local_err) {
169 error_report_err(local_err);
170 return;
171 }
172 }
173
174 monitor_printf(mon, " LISN PQ EISN CPU/PRIO EQ\n");
175
176 for (i = 0; i < xive->nr_irqs; i++) {
177 uint8_t pq = xive_source_esb_get(xsrc, i);
178 XiveEAS *eas = &xive->eat[i];
179
180 if (!xive_eas_is_valid(eas)) {
181 continue;
182 }
183
184 monitor_printf(mon, " %08x %s %c%c%c %s %08x ", i,
185 xive_source_irq_is_lsi(xsrc, i) ? "LSI" : "MSI",
186 pq & XIVE_ESB_VAL_P ? 'P' : '-',
187 pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
188 xsrc->status[i] & XIVE_STATUS_ASSERTED ? 'A' : ' ',
189 xive_eas_is_masked(eas) ? "M" : " ",
190 (int) xive_get_field64(EAS_END_DATA, eas->w));
191
192 if (!xive_eas_is_masked(eas)) {
193 uint32_t end_idx = xive_get_field64(EAS_END_INDEX, eas->w);
194 XiveEND *end;
195
196 assert(end_idx < xive->nr_ends);
197 end = &xive->endt[end_idx];
198
199 if (xive_end_is_valid(end)) {
200 spapr_xive_end_pic_print_info(xive, end, mon);
201 }
202 }
203 monitor_printf(mon, "\n");
204 }
205 }
206
207 void spapr_xive_mmio_set_enabled(SpaprXive *xive, bool enable)
208 {
209 memory_region_set_enabled(&xive->source.esb_mmio, enable);
210 memory_region_set_enabled(&xive->tm_mmio, enable);
211
212 /* Disable the END ESBs until a guest OS makes use of them */
213 memory_region_set_enabled(&xive->end_source.esb_mmio, false);
214 }
215
216 static void spapr_xive_tm_write(void *opaque, hwaddr offset,
217 uint64_t value, unsigned size)
218 {
219 XiveTCTX *tctx = spapr_cpu_state(POWERPC_CPU(current_cpu))->tctx;
220
221 xive_tctx_tm_write(XIVE_PRESENTER(opaque), tctx, offset, value, size);
222 }
223
224 static uint64_t spapr_xive_tm_read(void *opaque, hwaddr offset, unsigned size)
225 {
226 XiveTCTX *tctx = spapr_cpu_state(POWERPC_CPU(current_cpu))->tctx;
227
228 return xive_tctx_tm_read(XIVE_PRESENTER(opaque), tctx, offset, size);
229 }
230
231 const MemoryRegionOps spapr_xive_tm_ops = {
232 .read = spapr_xive_tm_read,
233 .write = spapr_xive_tm_write,
234 .endianness = DEVICE_BIG_ENDIAN,
235 .valid = {
236 .min_access_size = 1,
237 .max_access_size = 8,
238 },
239 .impl = {
240 .min_access_size = 1,
241 .max_access_size = 8,
242 },
243 };
244
245 static void spapr_xive_end_reset(XiveEND *end)
246 {
247 memset(end, 0, sizeof(*end));
248
249 /* switch off the escalation and notification ESBs */
250 end->w1 = cpu_to_be32(END_W1_ESe_Q | END_W1_ESn_Q);
251 }
252
253 static void spapr_xive_reset(void *dev)
254 {
255 SpaprXive *xive = SPAPR_XIVE(dev);
256 int i;
257
258 /*
259 * The XiveSource has its own reset handler, which mask off all
260 * IRQs (!P|Q)
261 */
262
263 /* Mask all valid EASs in the IRQ number space. */
264 for (i = 0; i < xive->nr_irqs; i++) {
265 XiveEAS *eas = &xive->eat[i];
266 if (xive_eas_is_valid(eas)) {
267 eas->w = cpu_to_be64(EAS_VALID | EAS_MASKED);
268 } else {
269 eas->w = 0;
270 }
271 }
272
273 /* Clear all ENDs */
274 for (i = 0; i < xive->nr_ends; i++) {
275 spapr_xive_end_reset(&xive->endt[i]);
276 }
277 }
278
279 static void spapr_xive_instance_init(Object *obj)
280 {
281 SpaprXive *xive = SPAPR_XIVE(obj);
282
283 object_initialize_child(obj, "source", &xive->source, TYPE_XIVE_SOURCE);
284
285 object_initialize_child(obj, "end_source", &xive->end_source,
286 TYPE_XIVE_END_SOURCE);
287
288 /* Not connected to the KVM XIVE device */
289 xive->fd = -1;
290 }
291
292 static void spapr_xive_realize(DeviceState *dev, Error **errp)
293 {
294 SpaprXive *xive = SPAPR_XIVE(dev);
295 SpaprXiveClass *sxc = SPAPR_XIVE_GET_CLASS(xive);
296 XiveSource *xsrc = &xive->source;
297 XiveENDSource *end_xsrc = &xive->end_source;
298 Error *local_err = NULL;
299
300 /* Set by spapr_irq_init() */
301 g_assert(xive->nr_irqs);
302 g_assert(xive->nr_ends);
303
304 sxc->parent_realize(dev, &local_err);
305 if (local_err) {
306 error_propagate(errp, local_err);
307 return;
308 }
309
310 /*
311 * Initialize the internal sources, for IPIs and virtual devices.
312 */
313 object_property_set_int(OBJECT(xsrc), "nr-irqs", xive->nr_irqs,
314 &error_fatal);
315 object_property_set_link(OBJECT(xsrc), "xive", OBJECT(xive), &error_abort);
316 if (!qdev_realize(DEVICE(xsrc), NULL, errp)) {
317 return;
318 }
319 sysbus_init_mmio(SYS_BUS_DEVICE(xive), &xsrc->esb_mmio);
320
321 /*
322 * Initialize the END ESB source
323 */
324 object_property_set_int(OBJECT(end_xsrc), "nr-ends", xive->nr_irqs,
325 &error_fatal);
326 object_property_set_link(OBJECT(end_xsrc), "xive", OBJECT(xive),
327 &error_abort);
328 if (!qdev_realize(DEVICE(end_xsrc), NULL, errp)) {
329 return;
330 }
331 sysbus_init_mmio(SYS_BUS_DEVICE(xive), &end_xsrc->esb_mmio);
332
333 /* Set the mapping address of the END ESB pages after the source ESBs */
334 xive->end_base = xive->vc_base + xive_source_esb_len(xsrc);
335
336 /*
337 * Allocate the routing tables
338 */
339 xive->eat = g_new0(XiveEAS, xive->nr_irqs);
340 xive->endt = g_new0(XiveEND, xive->nr_ends);
341
342 xive->nodename = g_strdup_printf("interrupt-controller@%" PRIx64,
343 xive->tm_base + XIVE_TM_USER_PAGE * (1 << TM_SHIFT));
344
345 qemu_register_reset(spapr_xive_reset, dev);
346
347 /* TIMA initialization */
348 memory_region_init_io(&xive->tm_mmio, OBJECT(xive), &spapr_xive_tm_ops,
349 xive, "xive.tima", 4ull << TM_SHIFT);
350 sysbus_init_mmio(SYS_BUS_DEVICE(xive), &xive->tm_mmio);
351
352 /*
353 * Map all regions. These will be enabled or disabled at reset and
354 * can also be overridden by KVM memory regions if active
355 */
356 sysbus_mmio_map(SYS_BUS_DEVICE(xive), 0, xive->vc_base);
357 sysbus_mmio_map(SYS_BUS_DEVICE(xive), 1, xive->end_base);
358 sysbus_mmio_map(SYS_BUS_DEVICE(xive), 2, xive->tm_base);
359 }
360
361 static int spapr_xive_get_eas(XiveRouter *xrtr, uint8_t eas_blk,
362 uint32_t eas_idx, XiveEAS *eas)
363 {
364 SpaprXive *xive = SPAPR_XIVE(xrtr);
365
366 if (eas_idx >= xive->nr_irqs) {
367 return -1;
368 }
369
370 *eas = xive->eat[eas_idx];
371 return 0;
372 }
373
374 static int spapr_xive_get_end(XiveRouter *xrtr,
375 uint8_t end_blk, uint32_t end_idx, XiveEND *end)
376 {
377 SpaprXive *xive = SPAPR_XIVE(xrtr);
378
379 if (end_idx >= xive->nr_ends) {
380 return -1;
381 }
382
383 memcpy(end, &xive->endt[end_idx], sizeof(XiveEND));
384 return 0;
385 }
386
387 static int spapr_xive_write_end(XiveRouter *xrtr, uint8_t end_blk,
388 uint32_t end_idx, XiveEND *end,
389 uint8_t word_number)
390 {
391 SpaprXive *xive = SPAPR_XIVE(xrtr);
392
393 if (end_idx >= xive->nr_ends) {
394 return -1;
395 }
396
397 memcpy(&xive->endt[end_idx], end, sizeof(XiveEND));
398 return 0;
399 }
400
401 static int spapr_xive_get_nvt(XiveRouter *xrtr,
402 uint8_t nvt_blk, uint32_t nvt_idx, XiveNVT *nvt)
403 {
404 uint32_t vcpu_id = spapr_xive_nvt_to_target(nvt_blk, nvt_idx);
405 PowerPCCPU *cpu = spapr_find_cpu(vcpu_id);
406
407 if (!cpu) {
408 /* TODO: should we assert() if we can find a NVT ? */
409 return -1;
410 }
411
412 /*
413 * sPAPR does not maintain a NVT table. Return that the NVT is
414 * valid if we have found a matching CPU
415 */
416 nvt->w0 = cpu_to_be32(NVT_W0_VALID);
417 return 0;
418 }
419
420 static int spapr_xive_write_nvt(XiveRouter *xrtr, uint8_t nvt_blk,
421 uint32_t nvt_idx, XiveNVT *nvt,
422 uint8_t word_number)
423 {
424 /*
425 * We don't need to write back to the NVTs because the sPAPR
426 * machine should never hit a non-scheduled NVT. It should never
427 * get called.
428 */
429 g_assert_not_reached();
430 }
431
432 static int spapr_xive_match_nvt(XivePresenter *xptr, uint8_t format,
433 uint8_t nvt_blk, uint32_t nvt_idx,
434 bool cam_ignore, uint8_t priority,
435 uint32_t logic_serv, XiveTCTXMatch *match)
436 {
437 CPUState *cs;
438 int count = 0;
439
440 CPU_FOREACH(cs) {
441 PowerPCCPU *cpu = POWERPC_CPU(cs);
442 XiveTCTX *tctx = spapr_cpu_state(cpu)->tctx;
443 int ring;
444
445 /*
446 * Skip partially initialized vCPUs. This can happen when
447 * vCPUs are hotplugged.
448 */
449 if (!tctx) {
450 continue;
451 }
452
453 /*
454 * Check the thread context CAM lines and record matches.
455 */
456 ring = xive_presenter_tctx_match(xptr, tctx, format, nvt_blk, nvt_idx,
457 cam_ignore, logic_serv);
458 /*
459 * Save the matching thread interrupt context and follow on to
460 * check for duplicates which are invalid.
461 */
462 if (ring != -1) {
463 if (match->tctx) {
464 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a thread "
465 "context NVT %x/%x\n", nvt_blk, nvt_idx);
466 return -1;
467 }
468
469 match->ring = ring;
470 match->tctx = tctx;
471 count++;
472 }
473 }
474
475 return count;
476 }
477
478 static uint8_t spapr_xive_get_block_id(XiveRouter *xrtr)
479 {
480 return SPAPR_XIVE_BLOCK_ID;
481 }
482
483 static const VMStateDescription vmstate_spapr_xive_end = {
484 .name = TYPE_SPAPR_XIVE "/end",
485 .version_id = 1,
486 .minimum_version_id = 1,
487 .fields = (VMStateField []) {
488 VMSTATE_UINT32(w0, XiveEND),
489 VMSTATE_UINT32(w1, XiveEND),
490 VMSTATE_UINT32(w2, XiveEND),
491 VMSTATE_UINT32(w3, XiveEND),
492 VMSTATE_UINT32(w4, XiveEND),
493 VMSTATE_UINT32(w5, XiveEND),
494 VMSTATE_UINT32(w6, XiveEND),
495 VMSTATE_UINT32(w7, XiveEND),
496 VMSTATE_END_OF_LIST()
497 },
498 };
499
500 static const VMStateDescription vmstate_spapr_xive_eas = {
501 .name = TYPE_SPAPR_XIVE "/eas",
502 .version_id = 1,
503 .minimum_version_id = 1,
504 .fields = (VMStateField []) {
505 VMSTATE_UINT64(w, XiveEAS),
506 VMSTATE_END_OF_LIST()
507 },
508 };
509
510 static int vmstate_spapr_xive_pre_save(void *opaque)
511 {
512 SpaprXive *xive = SPAPR_XIVE(opaque);
513
514 if (spapr_xive_in_kernel(xive)) {
515 return kvmppc_xive_pre_save(xive);
516 }
517
518 return 0;
519 }
520
521 /*
522 * Called by the sPAPR IRQ backend 'post_load' method at the machine
523 * level.
524 */
525 static int spapr_xive_post_load(SpaprInterruptController *intc, int version_id)
526 {
527 SpaprXive *xive = SPAPR_XIVE(intc);
528
529 if (spapr_xive_in_kernel(xive)) {
530 return kvmppc_xive_post_load(xive, version_id);
531 }
532
533 return 0;
534 }
535
536 static const VMStateDescription vmstate_spapr_xive = {
537 .name = TYPE_SPAPR_XIVE,
538 .version_id = 1,
539 .minimum_version_id = 1,
540 .pre_save = vmstate_spapr_xive_pre_save,
541 .post_load = NULL, /* handled at the machine level */
542 .fields = (VMStateField[]) {
543 VMSTATE_UINT32_EQUAL(nr_irqs, SpaprXive, NULL),
544 VMSTATE_STRUCT_VARRAY_POINTER_UINT32(eat, SpaprXive, nr_irqs,
545 vmstate_spapr_xive_eas, XiveEAS),
546 VMSTATE_STRUCT_VARRAY_POINTER_UINT32(endt, SpaprXive, nr_ends,
547 vmstate_spapr_xive_end, XiveEND),
548 VMSTATE_END_OF_LIST()
549 },
550 };
551
552 static int spapr_xive_claim_irq(SpaprInterruptController *intc, int lisn,
553 bool lsi, Error **errp)
554 {
555 SpaprXive *xive = SPAPR_XIVE(intc);
556 XiveSource *xsrc = &xive->source;
557
558 assert(lisn < xive->nr_irqs);
559
560 trace_spapr_xive_claim_irq(lisn, lsi);
561
562 if (xive_eas_is_valid(&xive->eat[lisn])) {
563 error_setg(errp, "IRQ %d is not free", lisn);
564 return -EBUSY;
565 }
566
567 /*
568 * Set default values when allocating an IRQ number
569 */
570 xive->eat[lisn].w |= cpu_to_be64(EAS_VALID | EAS_MASKED);
571 if (lsi) {
572 xive_source_irq_set_lsi(xsrc, lisn);
573 }
574
575 if (spapr_xive_in_kernel(xive)) {
576 return kvmppc_xive_source_reset_one(xsrc, lisn, errp);
577 }
578
579 return 0;
580 }
581
582 static void spapr_xive_free_irq(SpaprInterruptController *intc, int lisn)
583 {
584 SpaprXive *xive = SPAPR_XIVE(intc);
585 assert(lisn < xive->nr_irqs);
586
587 trace_spapr_xive_free_irq(lisn);
588
589 xive->eat[lisn].w &= cpu_to_be64(~EAS_VALID);
590 }
591
592 static Property spapr_xive_properties[] = {
593 DEFINE_PROP_UINT32("nr-irqs", SpaprXive, nr_irqs, 0),
594 DEFINE_PROP_UINT32("nr-ends", SpaprXive, nr_ends, 0),
595 DEFINE_PROP_UINT64("vc-base", SpaprXive, vc_base, SPAPR_XIVE_VC_BASE),
596 DEFINE_PROP_UINT64("tm-base", SpaprXive, tm_base, SPAPR_XIVE_TM_BASE),
597 DEFINE_PROP_UINT8("hv-prio", SpaprXive, hv_prio, 7),
598 DEFINE_PROP_END_OF_LIST(),
599 };
600
601 static int spapr_xive_cpu_intc_create(SpaprInterruptController *intc,
602 PowerPCCPU *cpu, Error **errp)
603 {
604 SpaprXive *xive = SPAPR_XIVE(intc);
605 Object *obj;
606 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
607
608 obj = xive_tctx_create(OBJECT(cpu), XIVE_PRESENTER(xive), errp);
609 if (!obj) {
610 return -1;
611 }
612
613 spapr_cpu->tctx = XIVE_TCTX(obj);
614 return 0;
615 }
616
617 static void xive_tctx_set_os_cam(XiveTCTX *tctx, uint32_t os_cam)
618 {
619 uint32_t qw1w2 = cpu_to_be32(TM_QW1W2_VO | os_cam);
620 memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2, 4);
621 }
622
623 static void spapr_xive_cpu_intc_reset(SpaprInterruptController *intc,
624 PowerPCCPU *cpu)
625 {
626 XiveTCTX *tctx = spapr_cpu_state(cpu)->tctx;
627 uint8_t nvt_blk;
628 uint32_t nvt_idx;
629
630 xive_tctx_reset(tctx);
631
632 /*
633 * When a Virtual Processor is scheduled to run on a HW thread,
634 * the hypervisor pushes its identifier in the OS CAM line.
635 * Emulate the same behavior under QEMU.
636 */
637 spapr_xive_cpu_to_nvt(cpu, &nvt_blk, &nvt_idx);
638
639 xive_tctx_set_os_cam(tctx, xive_nvt_cam_line(nvt_blk, nvt_idx));
640 }
641
642 static void spapr_xive_cpu_intc_destroy(SpaprInterruptController *intc,
643 PowerPCCPU *cpu)
644 {
645 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
646
647 xive_tctx_destroy(spapr_cpu->tctx);
648 spapr_cpu->tctx = NULL;
649 }
650
651 static void spapr_xive_set_irq(SpaprInterruptController *intc, int irq, int val)
652 {
653 SpaprXive *xive = SPAPR_XIVE(intc);
654
655 trace_spapr_xive_set_irq(irq, val);
656
657 if (spapr_xive_in_kernel(xive)) {
658 kvmppc_xive_source_set_irq(&xive->source, irq, val);
659 } else {
660 xive_source_set_irq(&xive->source, irq, val);
661 }
662 }
663
664 static void spapr_xive_print_info(SpaprInterruptController *intc, Monitor *mon)
665 {
666 SpaprXive *xive = SPAPR_XIVE(intc);
667 CPUState *cs;
668
669 CPU_FOREACH(cs) {
670 PowerPCCPU *cpu = POWERPC_CPU(cs);
671
672 xive_tctx_pic_print_info(spapr_cpu_state(cpu)->tctx, mon);
673 }
674
675 spapr_xive_pic_print_info(xive, mon);
676 }
677
678 static void spapr_xive_dt(SpaprInterruptController *intc, uint32_t nr_servers,
679 void *fdt, uint32_t phandle)
680 {
681 SpaprXive *xive = SPAPR_XIVE(intc);
682 int node;
683 uint64_t timas[2 * 2];
684 /* Interrupt number ranges for the IPIs */
685 uint32_t lisn_ranges[] = {
686 cpu_to_be32(SPAPR_IRQ_IPI),
687 cpu_to_be32(SPAPR_IRQ_IPI + nr_servers),
688 };
689 /*
690 * EQ size - the sizes of pages supported by the system 4K, 64K,
691 * 2M, 16M. We only advertise 64K for the moment.
692 */
693 uint32_t eq_sizes[] = {
694 cpu_to_be32(16), /* 64K */
695 };
696 /*
697 * QEMU/KVM only needs to define a single range to reserve the
698 * escalation priority. A priority bitmask would have been more
699 * appropriate.
700 */
701 uint32_t plat_res_int_priorities[] = {
702 cpu_to_be32(xive->hv_prio), /* start */
703 cpu_to_be32(0xff - xive->hv_prio), /* count */
704 };
705
706 /* Thread Interrupt Management Area : User (ring 3) and OS (ring 2) */
707 timas[0] = cpu_to_be64(xive->tm_base +
708 XIVE_TM_USER_PAGE * (1ull << TM_SHIFT));
709 timas[1] = cpu_to_be64(1ull << TM_SHIFT);
710 timas[2] = cpu_to_be64(xive->tm_base +
711 XIVE_TM_OS_PAGE * (1ull << TM_SHIFT));
712 timas[3] = cpu_to_be64(1ull << TM_SHIFT);
713
714 _FDT(node = fdt_add_subnode(fdt, 0, xive->nodename));
715
716 _FDT(fdt_setprop_string(fdt, node, "device_type", "power-ivpe"));
717 _FDT(fdt_setprop(fdt, node, "reg", timas, sizeof(timas)));
718
719 _FDT(fdt_setprop_string(fdt, node, "compatible", "ibm,power-ivpe"));
720 _FDT(fdt_setprop(fdt, node, "ibm,xive-eq-sizes", eq_sizes,
721 sizeof(eq_sizes)));
722 _FDT(fdt_setprop(fdt, node, "ibm,xive-lisn-ranges", lisn_ranges,
723 sizeof(lisn_ranges)));
724
725 /* For Linux to link the LSIs to the interrupt controller. */
726 _FDT(fdt_setprop(fdt, node, "interrupt-controller", NULL, 0));
727 _FDT(fdt_setprop_cell(fdt, node, "#interrupt-cells", 2));
728
729 /* For SLOF */
730 _FDT(fdt_setprop_cell(fdt, node, "linux,phandle", phandle));
731 _FDT(fdt_setprop_cell(fdt, node, "phandle", phandle));
732
733 /*
734 * The "ibm,plat-res-int-priorities" property defines the priority
735 * ranges reserved by the hypervisor
736 */
737 _FDT(fdt_setprop(fdt, 0, "ibm,plat-res-int-priorities",
738 plat_res_int_priorities, sizeof(plat_res_int_priorities)));
739 }
740
741 static int spapr_xive_activate(SpaprInterruptController *intc,
742 uint32_t nr_servers, Error **errp)
743 {
744 SpaprXive *xive = SPAPR_XIVE(intc);
745
746 if (kvm_enabled()) {
747 int rc = spapr_irq_init_kvm(kvmppc_xive_connect, intc, nr_servers,
748 errp);
749 if (rc < 0) {
750 return rc;
751 }
752 }
753
754 /* Activate the XIVE MMIOs */
755 spapr_xive_mmio_set_enabled(xive, true);
756
757 return 0;
758 }
759
760 static void spapr_xive_deactivate(SpaprInterruptController *intc)
761 {
762 SpaprXive *xive = SPAPR_XIVE(intc);
763
764 spapr_xive_mmio_set_enabled(xive, false);
765
766 if (spapr_xive_in_kernel(xive)) {
767 kvmppc_xive_disconnect(intc);
768 }
769 }
770
771 static bool spapr_xive_in_kernel_xptr(const XivePresenter *xptr)
772 {
773 return spapr_xive_in_kernel(SPAPR_XIVE(xptr));
774 }
775
776 static void spapr_xive_class_init(ObjectClass *klass, void *data)
777 {
778 DeviceClass *dc = DEVICE_CLASS(klass);
779 XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass);
780 SpaprInterruptControllerClass *sicc = SPAPR_INTC_CLASS(klass);
781 XivePresenterClass *xpc = XIVE_PRESENTER_CLASS(klass);
782 SpaprXiveClass *sxc = SPAPR_XIVE_CLASS(klass);
783
784 dc->desc = "sPAPR XIVE Interrupt Controller";
785 device_class_set_props(dc, spapr_xive_properties);
786 device_class_set_parent_realize(dc, spapr_xive_realize,
787 &sxc->parent_realize);
788 dc->vmsd = &vmstate_spapr_xive;
789
790 xrc->get_eas = spapr_xive_get_eas;
791 xrc->get_end = spapr_xive_get_end;
792 xrc->write_end = spapr_xive_write_end;
793 xrc->get_nvt = spapr_xive_get_nvt;
794 xrc->write_nvt = spapr_xive_write_nvt;
795 xrc->get_block_id = spapr_xive_get_block_id;
796
797 sicc->activate = spapr_xive_activate;
798 sicc->deactivate = spapr_xive_deactivate;
799 sicc->cpu_intc_create = spapr_xive_cpu_intc_create;
800 sicc->cpu_intc_reset = spapr_xive_cpu_intc_reset;
801 sicc->cpu_intc_destroy = spapr_xive_cpu_intc_destroy;
802 sicc->claim_irq = spapr_xive_claim_irq;
803 sicc->free_irq = spapr_xive_free_irq;
804 sicc->set_irq = spapr_xive_set_irq;
805 sicc->print_info = spapr_xive_print_info;
806 sicc->dt = spapr_xive_dt;
807 sicc->post_load = spapr_xive_post_load;
808
809 xpc->match_nvt = spapr_xive_match_nvt;
810 xpc->in_kernel = spapr_xive_in_kernel_xptr;
811 }
812
813 static const TypeInfo spapr_xive_info = {
814 .name = TYPE_SPAPR_XIVE,
815 .parent = TYPE_XIVE_ROUTER,
816 .instance_init = spapr_xive_instance_init,
817 .instance_size = sizeof(SpaprXive),
818 .class_init = spapr_xive_class_init,
819 .class_size = sizeof(SpaprXiveClass),
820 .interfaces = (InterfaceInfo[]) {
821 { TYPE_SPAPR_INTC },
822 { }
823 },
824 };
825
826 static void spapr_xive_register_types(void)
827 {
828 type_register_static(&spapr_xive_info);
829 }
830
831 type_init(spapr_xive_register_types)
832
833 /*
834 * XIVE hcalls
835 *
836 * The terminology used by the XIVE hcalls is the following :
837 *
838 * TARGET vCPU number
839 * EQ Event Queue assigned by OS to receive event data
840 * ESB page for source interrupt management
841 * LISN Logical Interrupt Source Number identifying a source in the
842 * machine
843 * EISN Effective Interrupt Source Number used by guest OS to
844 * identify source in the guest
845 *
846 * The EAS, END, NVT structures are not exposed.
847 */
848
849 /*
850 * On POWER9, the KVM XIVE device uses priority 7 for the escalation
851 * interrupts. So we only allow the guest to use priorities [0..6].
852 */
853 static bool spapr_xive_priority_is_reserved(SpaprXive *xive, uint8_t priority)
854 {
855 return priority >= xive->hv_prio;
856 }
857
858 /*
859 * The H_INT_GET_SOURCE_INFO hcall() is used to obtain the logical
860 * real address of the MMIO page through which the Event State Buffer
861 * entry associated with the value of the "lisn" parameter is managed.
862 *
863 * Parameters:
864 * Input
865 * - R4: "flags"
866 * Bits 0-63 reserved
867 * - R5: "lisn" is per "interrupts", "interrupt-map", or
868 * "ibm,xive-lisn-ranges" properties, or as returned by the
869 * ibm,query-interrupt-source-number RTAS call, or as returned
870 * by the H_ALLOCATE_VAS_WINDOW hcall
871 *
872 * Output
873 * - R4: "flags"
874 * Bits 0-59: Reserved
875 * Bit 60: H_INT_ESB must be used for Event State Buffer
876 * management
877 * Bit 61: 1 == LSI 0 == MSI
878 * Bit 62: the full function page supports trigger
879 * Bit 63: Store EOI Supported
880 * - R5: Logical Real address of full function Event State Buffer
881 * management page, -1 if H_INT_ESB hcall flag is set to 1.
882 * - R6: Logical Real Address of trigger only Event State Buffer
883 * management page or -1.
884 * - R7: Power of 2 page size for the ESB management pages returned in
885 * R5 and R6.
886 */
887
888 #define SPAPR_XIVE_SRC_H_INT_ESB PPC_BIT(60) /* ESB manage with H_INT_ESB */
889 #define SPAPR_XIVE_SRC_LSI PPC_BIT(61) /* Virtual LSI type */
890 #define SPAPR_XIVE_SRC_TRIGGER PPC_BIT(62) /* Trigger and management
891 on same page */
892 #define SPAPR_XIVE_SRC_STORE_EOI PPC_BIT(63) /* Store EOI support */
893
894 static target_ulong h_int_get_source_info(PowerPCCPU *cpu,
895 SpaprMachineState *spapr,
896 target_ulong opcode,
897 target_ulong *args)
898 {
899 SpaprXive *xive = spapr->xive;
900 XiveSource *xsrc = &xive->source;
901 target_ulong flags = args[0];
902 target_ulong lisn = args[1];
903
904 trace_spapr_xive_get_source_info(flags, lisn);
905
906 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
907 return H_FUNCTION;
908 }
909
910 if (flags) {
911 return H_PARAMETER;
912 }
913
914 if (lisn >= xive->nr_irqs) {
915 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
916 lisn);
917 return H_P2;
918 }
919
920 if (!xive_eas_is_valid(&xive->eat[lisn])) {
921 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
922 lisn);
923 return H_P2;
924 }
925
926 /*
927 * All sources are emulated under the main XIVE object and share
928 * the same characteristics.
929 */
930 args[0] = 0;
931 if (!xive_source_esb_has_2page(xsrc)) {
932 args[0] |= SPAPR_XIVE_SRC_TRIGGER;
933 }
934 if (xsrc->esb_flags & XIVE_SRC_STORE_EOI) {
935 args[0] |= SPAPR_XIVE_SRC_STORE_EOI;
936 }
937
938 /*
939 * Force the use of the H_INT_ESB hcall in case of an LSI
940 * interrupt. This is necessary under KVM to re-trigger the
941 * interrupt if the level is still asserted
942 */
943 if (xive_source_irq_is_lsi(xsrc, lisn)) {
944 args[0] |= SPAPR_XIVE_SRC_H_INT_ESB | SPAPR_XIVE_SRC_LSI;
945 }
946
947 if (!(args[0] & SPAPR_XIVE_SRC_H_INT_ESB)) {
948 args[1] = xive->vc_base + xive_source_esb_mgmt(xsrc, lisn);
949 } else {
950 args[1] = -1;
951 }
952
953 if (xive_source_esb_has_2page(xsrc) &&
954 !(args[0] & SPAPR_XIVE_SRC_H_INT_ESB)) {
955 args[2] = xive->vc_base + xive_source_esb_page(xsrc, lisn);
956 } else {
957 args[2] = -1;
958 }
959
960 if (xive_source_esb_has_2page(xsrc)) {
961 args[3] = xsrc->esb_shift - 1;
962 } else {
963 args[3] = xsrc->esb_shift;
964 }
965
966 return H_SUCCESS;
967 }
968
969 /*
970 * The H_INT_SET_SOURCE_CONFIG hcall() is used to assign a Logical
971 * Interrupt Source to a target. The Logical Interrupt Source is
972 * designated with the "lisn" parameter and the target is designated
973 * with the "target" and "priority" parameters. Upon return from the
974 * hcall(), no additional interrupts will be directed to the old EQ.
975 *
976 * Parameters:
977 * Input:
978 * - R4: "flags"
979 * Bits 0-61: Reserved
980 * Bit 62: set the "eisn" in the EAS
981 * Bit 63: masks the interrupt source in the hardware interrupt
982 * control structure. An interrupt masked by this mechanism will
983 * be dropped, but it's source state bits will still be
984 * set. There is no race-free way of unmasking and restoring the
985 * source. Thus this should only be used in interrupts that are
986 * also masked at the source, and only in cases where the
987 * interrupt is not meant to be used for a large amount of time
988 * because no valid target exists for it for example
989 * - R5: "lisn" is per "interrupts", "interrupt-map", or
990 * "ibm,xive-lisn-ranges" properties, or as returned by the
991 * ibm,query-interrupt-source-number RTAS call, or as returned by
992 * the H_ALLOCATE_VAS_WINDOW hcall
993 * - R6: "target" is per "ibm,ppc-interrupt-server#s" or
994 * "ibm,ppc-interrupt-gserver#s"
995 * - R7: "priority" is a valid priority not in
996 * "ibm,plat-res-int-priorities"
997 * - R8: "eisn" is the guest EISN associated with the "lisn"
998 *
999 * Output:
1000 * - None
1001 */
1002
1003 #define SPAPR_XIVE_SRC_SET_EISN PPC_BIT(62)
1004 #define SPAPR_XIVE_SRC_MASK PPC_BIT(63)
1005
1006 static target_ulong h_int_set_source_config(PowerPCCPU *cpu,
1007 SpaprMachineState *spapr,
1008 target_ulong opcode,
1009 target_ulong *args)
1010 {
1011 SpaprXive *xive = spapr->xive;
1012 XiveEAS eas, new_eas;
1013 target_ulong flags = args[0];
1014 target_ulong lisn = args[1];
1015 target_ulong target = args[2];
1016 target_ulong priority = args[3];
1017 target_ulong eisn = args[4];
1018 uint8_t end_blk;
1019 uint32_t end_idx;
1020
1021 trace_spapr_xive_set_source_config(flags, lisn, target, priority, eisn);
1022
1023 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1024 return H_FUNCTION;
1025 }
1026
1027 if (flags & ~(SPAPR_XIVE_SRC_SET_EISN | SPAPR_XIVE_SRC_MASK)) {
1028 return H_PARAMETER;
1029 }
1030
1031 if (lisn >= xive->nr_irqs) {
1032 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
1033 lisn);
1034 return H_P2;
1035 }
1036
1037 eas = xive->eat[lisn];
1038 if (!xive_eas_is_valid(&eas)) {
1039 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
1040 lisn);
1041 return H_P2;
1042 }
1043
1044 /* priority 0xff is used to reset the EAS */
1045 if (priority == 0xff) {
1046 new_eas.w = cpu_to_be64(EAS_VALID | EAS_MASKED);
1047 goto out;
1048 }
1049
1050 if (flags & SPAPR_XIVE_SRC_MASK) {
1051 new_eas.w = eas.w | cpu_to_be64(EAS_MASKED);
1052 } else {
1053 new_eas.w = eas.w & cpu_to_be64(~EAS_MASKED);
1054 }
1055
1056 if (spapr_xive_priority_is_reserved(xive, priority)) {
1057 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld
1058 " is reserved\n", priority);
1059 return H_P4;
1060 }
1061
1062 /*
1063 * Validate that "target" is part of the list of threads allocated
1064 * to the partition. For that, find the END corresponding to the
1065 * target.
1066 */
1067 if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) {
1068 return H_P3;
1069 }
1070
1071 new_eas.w = xive_set_field64(EAS_END_BLOCK, new_eas.w, end_blk);
1072 new_eas.w = xive_set_field64(EAS_END_INDEX, new_eas.w, end_idx);
1073
1074 if (flags & SPAPR_XIVE_SRC_SET_EISN) {
1075 new_eas.w = xive_set_field64(EAS_END_DATA, new_eas.w, eisn);
1076 }
1077
1078 if (spapr_xive_in_kernel(xive)) {
1079 Error *local_err = NULL;
1080
1081 kvmppc_xive_set_source_config(xive, lisn, &new_eas, &local_err);
1082 if (local_err) {
1083 error_report_err(local_err);
1084 return H_HARDWARE;
1085 }
1086 }
1087
1088 out:
1089 xive->eat[lisn] = new_eas;
1090 return H_SUCCESS;
1091 }
1092
1093 /*
1094 * The H_INT_GET_SOURCE_CONFIG hcall() is used to determine to which
1095 * target/priority pair is assigned to the specified Logical Interrupt
1096 * Source.
1097 *
1098 * Parameters:
1099 * Input:
1100 * - R4: "flags"
1101 * Bits 0-63 Reserved
1102 * - R5: "lisn" is per "interrupts", "interrupt-map", or
1103 * "ibm,xive-lisn-ranges" properties, or as returned by the
1104 * ibm,query-interrupt-source-number RTAS call, or as
1105 * returned by the H_ALLOCATE_VAS_WINDOW hcall
1106 *
1107 * Output:
1108 * - R4: Target to which the specified Logical Interrupt Source is
1109 * assigned
1110 * - R5: Priority to which the specified Logical Interrupt Source is
1111 * assigned
1112 * - R6: EISN for the specified Logical Interrupt Source (this will be
1113 * equivalent to the LISN if not changed by H_INT_SET_SOURCE_CONFIG)
1114 */
1115 static target_ulong h_int_get_source_config(PowerPCCPU *cpu,
1116 SpaprMachineState *spapr,
1117 target_ulong opcode,
1118 target_ulong *args)
1119 {
1120 SpaprXive *xive = spapr->xive;
1121 target_ulong flags = args[0];
1122 target_ulong lisn = args[1];
1123 XiveEAS eas;
1124 XiveEND *end;
1125 uint8_t nvt_blk;
1126 uint32_t end_idx, nvt_idx;
1127
1128 trace_spapr_xive_get_source_config(flags, lisn);
1129
1130 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1131 return H_FUNCTION;
1132 }
1133
1134 if (flags) {
1135 return H_PARAMETER;
1136 }
1137
1138 if (lisn >= xive->nr_irqs) {
1139 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
1140 lisn);
1141 return H_P2;
1142 }
1143
1144 eas = xive->eat[lisn];
1145 if (!xive_eas_is_valid(&eas)) {
1146 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
1147 lisn);
1148 return H_P2;
1149 }
1150
1151 /* EAS_END_BLOCK is unused on sPAPR */
1152 end_idx = xive_get_field64(EAS_END_INDEX, eas.w);
1153
1154 assert(end_idx < xive->nr_ends);
1155 end = &xive->endt[end_idx];
1156
1157 nvt_blk = xive_get_field32(END_W6_NVT_BLOCK, end->w6);
1158 nvt_idx = xive_get_field32(END_W6_NVT_INDEX, end->w6);
1159 args[0] = spapr_xive_nvt_to_target(nvt_blk, nvt_idx);
1160
1161 if (xive_eas_is_masked(&eas)) {
1162 args[1] = 0xff;
1163 } else {
1164 args[1] = xive_get_field32(END_W7_F0_PRIORITY, end->w7);
1165 }
1166
1167 args[2] = xive_get_field64(EAS_END_DATA, eas.w);
1168
1169 return H_SUCCESS;
1170 }
1171
1172 /*
1173 * The H_INT_GET_QUEUE_INFO hcall() is used to get the logical real
1174 * address of the notification management page associated with the
1175 * specified target and priority.
1176 *
1177 * Parameters:
1178 * Input:
1179 * - R4: "flags"
1180 * Bits 0-63 Reserved
1181 * - R5: "target" is per "ibm,ppc-interrupt-server#s" or
1182 * "ibm,ppc-interrupt-gserver#s"
1183 * - R6: "priority" is a valid priority not in
1184 * "ibm,plat-res-int-priorities"
1185 *
1186 * Output:
1187 * - R4: Logical real address of notification page
1188 * - R5: Power of 2 page size of the notification page
1189 */
1190 static target_ulong h_int_get_queue_info(PowerPCCPU *cpu,
1191 SpaprMachineState *spapr,
1192 target_ulong opcode,
1193 target_ulong *args)
1194 {
1195 SpaprXive *xive = spapr->xive;
1196 XiveENDSource *end_xsrc = &xive->end_source;
1197 target_ulong flags = args[0];
1198 target_ulong target = args[1];
1199 target_ulong priority = args[2];
1200 XiveEND *end;
1201 uint8_t end_blk;
1202 uint32_t end_idx;
1203
1204 trace_spapr_xive_get_queue_info(flags, target, priority);
1205
1206 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1207 return H_FUNCTION;
1208 }
1209
1210 if (flags) {
1211 return H_PARAMETER;
1212 }
1213
1214 /*
1215 * H_STATE should be returned if a H_INT_RESET is in progress.
1216 * This is not needed when running the emulation under QEMU
1217 */
1218
1219 if (spapr_xive_priority_is_reserved(xive, priority)) {
1220 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld
1221 " is reserved\n", priority);
1222 return H_P3;
1223 }
1224
1225 /*
1226 * Validate that "target" is part of the list of threads allocated
1227 * to the partition. For that, find the END corresponding to the
1228 * target.
1229 */
1230 if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) {
1231 return H_P2;
1232 }
1233
1234 assert(end_idx < xive->nr_ends);
1235 end = &xive->endt[end_idx];
1236
1237 args[0] = xive->end_base + (1ull << (end_xsrc->esb_shift + 1)) * end_idx;
1238 if (xive_end_is_enqueue(end)) {
1239 args[1] = xive_get_field32(END_W0_QSIZE, end->w0) + 12;
1240 } else {
1241 args[1] = 0;
1242 }
1243
1244 return H_SUCCESS;
1245 }
1246
1247 /*
1248 * The H_INT_SET_QUEUE_CONFIG hcall() is used to set or reset a EQ for
1249 * a given "target" and "priority". It is also used to set the
1250 * notification config associated with the EQ. An EQ size of 0 is
1251 * used to reset the EQ config for a given target and priority. If
1252 * resetting the EQ config, the END associated with the given "target"
1253 * and "priority" will be changed to disable queueing.
1254 *
1255 * Upon return from the hcall(), no additional interrupts will be
1256 * directed to the old EQ (if one was set). The old EQ (if one was
1257 * set) should be investigated for interrupts that occurred prior to
1258 * or during the hcall().
1259 *
1260 * Parameters:
1261 * Input:
1262 * - R4: "flags"
1263 * Bits 0-62: Reserved
1264 * Bit 63: Unconditional Notify (n) per the XIVE spec
1265 * - R5: "target" is per "ibm,ppc-interrupt-server#s" or
1266 * "ibm,ppc-interrupt-gserver#s"
1267 * - R6: "priority" is a valid priority not in
1268 * "ibm,plat-res-int-priorities"
1269 * - R7: "eventQueue": The logical real address of the start of the EQ
1270 * - R8: "eventQueueSize": The power of 2 EQ size per "ibm,xive-eq-sizes"
1271 *
1272 * Output:
1273 * - None
1274 */
1275
1276 #define SPAPR_XIVE_END_ALWAYS_NOTIFY PPC_BIT(63)
1277
1278 static target_ulong h_int_set_queue_config(PowerPCCPU *cpu,
1279 SpaprMachineState *spapr,
1280 target_ulong opcode,
1281 target_ulong *args)
1282 {
1283 SpaprXive *xive = spapr->xive;
1284 target_ulong flags = args[0];
1285 target_ulong target = args[1];
1286 target_ulong priority = args[2];
1287 target_ulong qpage = args[3];
1288 target_ulong qsize = args[4];
1289 XiveEND end;
1290 uint8_t end_blk, nvt_blk;
1291 uint32_t end_idx, nvt_idx;
1292
1293 trace_spapr_xive_set_queue_config(flags, target, priority, qpage, qsize);
1294
1295 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1296 return H_FUNCTION;
1297 }
1298
1299 if (flags & ~SPAPR_XIVE_END_ALWAYS_NOTIFY) {
1300 return H_PARAMETER;
1301 }
1302
1303 /*
1304 * H_STATE should be returned if a H_INT_RESET is in progress.
1305 * This is not needed when running the emulation under QEMU
1306 */
1307
1308 if (spapr_xive_priority_is_reserved(xive, priority)) {
1309 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld
1310 " is reserved\n", priority);
1311 return H_P3;
1312 }
1313
1314 /*
1315 * Validate that "target" is part of the list of threads allocated
1316 * to the partition. For that, find the END corresponding to the
1317 * target.
1318 */
1319
1320 if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) {
1321 return H_P2;
1322 }
1323
1324 assert(end_idx < xive->nr_ends);
1325 memcpy(&end, &xive->endt[end_idx], sizeof(XiveEND));
1326
1327 switch (qsize) {
1328 case 12:
1329 case 16:
1330 case 21:
1331 case 24:
1332 if (!QEMU_IS_ALIGNED(qpage, 1ul << qsize)) {
1333 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: EQ @0x%" HWADDR_PRIx
1334 " is not naturally aligned with %" HWADDR_PRIx "\n",
1335 qpage, (hwaddr)1 << qsize);
1336 return H_P4;
1337 }
1338 end.w2 = cpu_to_be32((qpage >> 32) & 0x0fffffff);
1339 end.w3 = cpu_to_be32(qpage & 0xffffffff);
1340 end.w0 |= cpu_to_be32(END_W0_ENQUEUE);
1341 end.w0 = xive_set_field32(END_W0_QSIZE, end.w0, qsize - 12);
1342 break;
1343 case 0:
1344 /* reset queue and disable queueing */
1345 spapr_xive_end_reset(&end);
1346 goto out;
1347
1348 default:
1349 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid EQ size %"PRIx64"\n",
1350 qsize);
1351 return H_P5;
1352 }
1353
1354 if (qsize) {
1355 hwaddr plen = 1 << qsize;
1356 void *eq;
1357
1358 /*
1359 * Validate the guest EQ. We should also check that the queue
1360 * has been zeroed by the OS.
1361 */
1362 eq = address_space_map(CPU(cpu)->as, qpage, &plen, true,
1363 MEMTXATTRS_UNSPECIFIED);
1364 if (plen != 1 << qsize) {
1365 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to map EQ @0x%"
1366 HWADDR_PRIx "\n", qpage);
1367 return H_P4;
1368 }
1369 address_space_unmap(CPU(cpu)->as, eq, plen, true, plen);
1370 }
1371
1372 /* "target" should have been validated above */
1373 if (spapr_xive_target_to_nvt(target, &nvt_blk, &nvt_idx)) {
1374 g_assert_not_reached();
1375 }
1376
1377 /*
1378 * Ensure the priority and target are correctly set (they will not
1379 * be right after allocation)
1380 */
1381 end.w6 = xive_set_field32(END_W6_NVT_BLOCK, 0ul, nvt_blk) |
1382 xive_set_field32(END_W6_NVT_INDEX, 0ul, nvt_idx);
1383 end.w7 = xive_set_field32(END_W7_F0_PRIORITY, 0ul, priority);
1384
1385 if (flags & SPAPR_XIVE_END_ALWAYS_NOTIFY) {
1386 end.w0 |= cpu_to_be32(END_W0_UCOND_NOTIFY);
1387 } else {
1388 end.w0 &= cpu_to_be32((uint32_t)~END_W0_UCOND_NOTIFY);
1389 }
1390
1391 /*
1392 * The generation bit for the END starts at 1 and The END page
1393 * offset counter starts at 0.
1394 */
1395 end.w1 = cpu_to_be32(END_W1_GENERATION) |
1396 xive_set_field32(END_W1_PAGE_OFF, 0ul, 0ul);
1397 end.w0 |= cpu_to_be32(END_W0_VALID);
1398
1399 /*
1400 * TODO: issue syncs required to ensure all in-flight interrupts
1401 * are complete on the old END
1402 */
1403
1404 out:
1405 if (spapr_xive_in_kernel(xive)) {
1406 Error *local_err = NULL;
1407
1408 kvmppc_xive_set_queue_config(xive, end_blk, end_idx, &end, &local_err);
1409 if (local_err) {
1410 error_report_err(local_err);
1411 return H_HARDWARE;
1412 }
1413 }
1414
1415 /* Update END */
1416 memcpy(&xive->endt[end_idx], &end, sizeof(XiveEND));
1417 return H_SUCCESS;
1418 }
1419
1420 /*
1421 * The H_INT_GET_QUEUE_CONFIG hcall() is used to get a EQ for a given
1422 * target and priority.
1423 *
1424 * Parameters:
1425 * Input:
1426 * - R4: "flags"
1427 * Bits 0-62: Reserved
1428 * Bit 63: Debug: Return debug data
1429 * - R5: "target" is per "ibm,ppc-interrupt-server#s" or
1430 * "ibm,ppc-interrupt-gserver#s"
1431 * - R6: "priority" is a valid priority not in
1432 * "ibm,plat-res-int-priorities"
1433 *
1434 * Output:
1435 * - R4: "flags":
1436 * Bits 0-61: Reserved
1437 * Bit 62: The value of Event Queue Generation Number (g) per
1438 * the XIVE spec if "Debug" = 1
1439 * Bit 63: The value of Unconditional Notify (n) per the XIVE spec
1440 * - R5: The logical real address of the start of the EQ
1441 * - R6: The power of 2 EQ size per "ibm,xive-eq-sizes"
1442 * - R7: The value of Event Queue Offset Counter per XIVE spec
1443 * if "Debug" = 1, else 0
1444 *
1445 */
1446
1447 #define SPAPR_XIVE_END_DEBUG PPC_BIT(63)
1448
1449 static target_ulong h_int_get_queue_config(PowerPCCPU *cpu,
1450 SpaprMachineState *spapr,
1451 target_ulong opcode,
1452 target_ulong *args)
1453 {
1454 SpaprXive *xive = spapr->xive;
1455 target_ulong flags = args[0];
1456 target_ulong target = args[1];
1457 target_ulong priority = args[2];
1458 XiveEND *end;
1459 uint8_t end_blk;
1460 uint32_t end_idx;
1461
1462 trace_spapr_xive_get_queue_config(flags, target, priority);
1463
1464 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1465 return H_FUNCTION;
1466 }
1467
1468 if (flags & ~SPAPR_XIVE_END_DEBUG) {
1469 return H_PARAMETER;
1470 }
1471
1472 /*
1473 * H_STATE should be returned if a H_INT_RESET is in progress.
1474 * This is not needed when running the emulation under QEMU
1475 */
1476
1477 if (spapr_xive_priority_is_reserved(xive, priority)) {
1478 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld
1479 " is reserved\n", priority);
1480 return H_P3;
1481 }
1482
1483 /*
1484 * Validate that "target" is part of the list of threads allocated
1485 * to the partition. For that, find the END corresponding to the
1486 * target.
1487 */
1488 if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) {
1489 return H_P2;
1490 }
1491
1492 assert(end_idx < xive->nr_ends);
1493 end = &xive->endt[end_idx];
1494
1495 args[0] = 0;
1496 if (xive_end_is_notify(end)) {
1497 args[0] |= SPAPR_XIVE_END_ALWAYS_NOTIFY;
1498 }
1499
1500 if (xive_end_is_enqueue(end)) {
1501 args[1] = xive_end_qaddr(end);
1502 args[2] = xive_get_field32(END_W0_QSIZE, end->w0) + 12;
1503 } else {
1504 args[1] = 0;
1505 args[2] = 0;
1506 }
1507
1508 if (spapr_xive_in_kernel(xive)) {
1509 Error *local_err = NULL;
1510
1511 kvmppc_xive_get_queue_config(xive, end_blk, end_idx, end, &local_err);
1512 if (local_err) {
1513 error_report_err(local_err);
1514 return H_HARDWARE;
1515 }
1516 }
1517
1518 /* TODO: do we need any locking on the END ? */
1519 if (flags & SPAPR_XIVE_END_DEBUG) {
1520 /* Load the event queue generation number into the return flags */
1521 args[0] |= (uint64_t)xive_get_field32(END_W1_GENERATION, end->w1) << 62;
1522
1523 /* Load R7 with the event queue offset counter */
1524 args[3] = xive_get_field32(END_W1_PAGE_OFF, end->w1);
1525 } else {
1526 args[3] = 0;
1527 }
1528
1529 return H_SUCCESS;
1530 }
1531
1532 /*
1533 * The H_INT_SET_OS_REPORTING_LINE hcall() is used to set the
1534 * reporting cache line pair for the calling thread. The reporting
1535 * cache lines will contain the OS interrupt context when the OS
1536 * issues a CI store byte to @TIMA+0xC10 to acknowledge the OS
1537 * interrupt. The reporting cache lines can be reset by inputting -1
1538 * in "reportingLine". Issuing the CI store byte without reporting
1539 * cache lines registered will result in the data not being accessible
1540 * to the OS.
1541 *
1542 * Parameters:
1543 * Input:
1544 * - R4: "flags"
1545 * Bits 0-63: Reserved
1546 * - R5: "reportingLine": The logical real address of the reporting cache
1547 * line pair
1548 *
1549 * Output:
1550 * - None
1551 */
1552 static target_ulong h_int_set_os_reporting_line(PowerPCCPU *cpu,
1553 SpaprMachineState *spapr,
1554 target_ulong opcode,
1555 target_ulong *args)
1556 {
1557 target_ulong flags = args[0];
1558
1559 trace_spapr_xive_set_os_reporting_line(flags);
1560
1561 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1562 return H_FUNCTION;
1563 }
1564
1565 /*
1566 * H_STATE should be returned if a H_INT_RESET is in progress.
1567 * This is not needed when running the emulation under QEMU
1568 */
1569
1570 /* TODO: H_INT_SET_OS_REPORTING_LINE */
1571 return H_FUNCTION;
1572 }
1573
1574 /*
1575 * The H_INT_GET_OS_REPORTING_LINE hcall() is used to get the logical
1576 * real address of the reporting cache line pair set for the input
1577 * "target". If no reporting cache line pair has been set, -1 is
1578 * returned.
1579 *
1580 * Parameters:
1581 * Input:
1582 * - R4: "flags"
1583 * Bits 0-63: Reserved
1584 * - R5: "target" is per "ibm,ppc-interrupt-server#s" or
1585 * "ibm,ppc-interrupt-gserver#s"
1586 * - R6: "reportingLine": The logical real address of the reporting
1587 * cache line pair
1588 *
1589 * Output:
1590 * - R4: The logical real address of the reporting line if set, else -1
1591 */
1592 static target_ulong h_int_get_os_reporting_line(PowerPCCPU *cpu,
1593 SpaprMachineState *spapr,
1594 target_ulong opcode,
1595 target_ulong *args)
1596 {
1597 target_ulong flags = args[0];
1598
1599 trace_spapr_xive_get_os_reporting_line(flags);
1600
1601 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1602 return H_FUNCTION;
1603 }
1604
1605 /*
1606 * H_STATE should be returned if a H_INT_RESET is in progress.
1607 * This is not needed when running the emulation under QEMU
1608 */
1609
1610 /* TODO: H_INT_GET_OS_REPORTING_LINE */
1611 return H_FUNCTION;
1612 }
1613
1614 /*
1615 * The H_INT_ESB hcall() is used to issue a load or store to the ESB
1616 * page for the input "lisn". This hcall is only supported for LISNs
1617 * that have the ESB hcall flag set to 1 when returned from hcall()
1618 * H_INT_GET_SOURCE_INFO.
1619 *
1620 * Parameters:
1621 * Input:
1622 * - R4: "flags"
1623 * Bits 0-62: Reserved
1624 * bit 63: Store: Store=1, store operation, else load operation
1625 * - R5: "lisn" is per "interrupts", "interrupt-map", or
1626 * "ibm,xive-lisn-ranges" properties, or as returned by the
1627 * ibm,query-interrupt-source-number RTAS call, or as
1628 * returned by the H_ALLOCATE_VAS_WINDOW hcall
1629 * - R6: "esbOffset" is the offset into the ESB page for the load or
1630 * store operation
1631 * - R7: "storeData" is the data to write for a store operation
1632 *
1633 * Output:
1634 * - R4: The value of the load if load operation, else -1
1635 */
1636
1637 #define SPAPR_XIVE_ESB_STORE PPC_BIT(63)
1638
1639 static target_ulong h_int_esb(PowerPCCPU *cpu,
1640 SpaprMachineState *spapr,
1641 target_ulong opcode,
1642 target_ulong *args)
1643 {
1644 SpaprXive *xive = spapr->xive;
1645 XiveEAS eas;
1646 target_ulong flags = args[0];
1647 target_ulong lisn = args[1];
1648 target_ulong offset = args[2];
1649 target_ulong data = args[3];
1650 hwaddr mmio_addr;
1651 XiveSource *xsrc = &xive->source;
1652
1653 trace_spapr_xive_esb(flags, lisn, offset, data);
1654
1655 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1656 return H_FUNCTION;
1657 }
1658
1659 if (flags & ~SPAPR_XIVE_ESB_STORE) {
1660 return H_PARAMETER;
1661 }
1662
1663 if (lisn >= xive->nr_irqs) {
1664 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
1665 lisn);
1666 return H_P2;
1667 }
1668
1669 eas = xive->eat[lisn];
1670 if (!xive_eas_is_valid(&eas)) {
1671 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
1672 lisn);
1673 return H_P2;
1674 }
1675
1676 if (offset > (1ull << xsrc->esb_shift)) {
1677 return H_P3;
1678 }
1679
1680 if (spapr_xive_in_kernel(xive)) {
1681 args[0] = kvmppc_xive_esb_rw(xsrc, lisn, offset, data,
1682 flags & SPAPR_XIVE_ESB_STORE);
1683 } else {
1684 mmio_addr = xive->vc_base + xive_source_esb_mgmt(xsrc, lisn) + offset;
1685
1686 if (dma_memory_rw(&address_space_memory, mmio_addr, &data, 8,
1687 (flags & SPAPR_XIVE_ESB_STORE))) {
1688 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to access ESB @0x%"
1689 HWADDR_PRIx "\n", mmio_addr);
1690 return H_HARDWARE;
1691 }
1692 args[0] = (flags & SPAPR_XIVE_ESB_STORE) ? -1 : data;
1693 }
1694 return H_SUCCESS;
1695 }
1696
1697 /*
1698 * The H_INT_SYNC hcall() is used to issue hardware syncs that will
1699 * ensure any in flight events for the input lisn are in the event
1700 * queue.
1701 *
1702 * Parameters:
1703 * Input:
1704 * - R4: "flags"
1705 * Bits 0-63: Reserved
1706 * - R5: "lisn" is per "interrupts", "interrupt-map", or
1707 * "ibm,xive-lisn-ranges" properties, or as returned by the
1708 * ibm,query-interrupt-source-number RTAS call, or as
1709 * returned by the H_ALLOCATE_VAS_WINDOW hcall
1710 *
1711 * Output:
1712 * - None
1713 */
1714 static target_ulong h_int_sync(PowerPCCPU *cpu,
1715 SpaprMachineState *spapr,
1716 target_ulong opcode,
1717 target_ulong *args)
1718 {
1719 SpaprXive *xive = spapr->xive;
1720 XiveEAS eas;
1721 target_ulong flags = args[0];
1722 target_ulong lisn = args[1];
1723
1724 trace_spapr_xive_sync(flags, lisn);
1725
1726 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1727 return H_FUNCTION;
1728 }
1729
1730 if (flags) {
1731 return H_PARAMETER;
1732 }
1733
1734 if (lisn >= xive->nr_irqs) {
1735 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
1736 lisn);
1737 return H_P2;
1738 }
1739
1740 eas = xive->eat[lisn];
1741 if (!xive_eas_is_valid(&eas)) {
1742 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
1743 lisn);
1744 return H_P2;
1745 }
1746
1747 /*
1748 * H_STATE should be returned if a H_INT_RESET is in progress.
1749 * This is not needed when running the emulation under QEMU
1750 */
1751
1752 /*
1753 * This is not real hardware. Nothing to be done unless when
1754 * under KVM
1755 */
1756
1757 if (spapr_xive_in_kernel(xive)) {
1758 Error *local_err = NULL;
1759
1760 kvmppc_xive_sync_source(xive, lisn, &local_err);
1761 if (local_err) {
1762 error_report_err(local_err);
1763 return H_HARDWARE;
1764 }
1765 }
1766 return H_SUCCESS;
1767 }
1768
1769 /*
1770 * The H_INT_RESET hcall() is used to reset all of the partition's
1771 * interrupt exploitation structures to their initial state. This
1772 * means losing all previously set interrupt state set via
1773 * H_INT_SET_SOURCE_CONFIG and H_INT_SET_QUEUE_CONFIG.
1774 *
1775 * Parameters:
1776 * Input:
1777 * - R4: "flags"
1778 * Bits 0-63: Reserved
1779 *
1780 * Output:
1781 * - None
1782 */
1783 static target_ulong h_int_reset(PowerPCCPU *cpu,
1784 SpaprMachineState *spapr,
1785 target_ulong opcode,
1786 target_ulong *args)
1787 {
1788 SpaprXive *xive = spapr->xive;
1789 target_ulong flags = args[0];
1790
1791 trace_spapr_xive_reset(flags);
1792
1793 if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
1794 return H_FUNCTION;
1795 }
1796
1797 if (flags) {
1798 return H_PARAMETER;
1799 }
1800
1801 device_legacy_reset(DEVICE(xive));
1802
1803 if (spapr_xive_in_kernel(xive)) {
1804 Error *local_err = NULL;
1805
1806 kvmppc_xive_reset(xive, &local_err);
1807 if (local_err) {
1808 error_report_err(local_err);
1809 return H_HARDWARE;
1810 }
1811 }
1812 return H_SUCCESS;
1813 }
1814
1815 void spapr_xive_hcall_init(SpaprMachineState *spapr)
1816 {
1817 spapr_register_hypercall(H_INT_GET_SOURCE_INFO, h_int_get_source_info);
1818 spapr_register_hypercall(H_INT_SET_SOURCE_CONFIG, h_int_set_source_config);
1819 spapr_register_hypercall(H_INT_GET_SOURCE_CONFIG, h_int_get_source_config);
1820 spapr_register_hypercall(H_INT_GET_QUEUE_INFO, h_int_get_queue_info);
1821 spapr_register_hypercall(H_INT_SET_QUEUE_CONFIG, h_int_set_queue_config);
1822 spapr_register_hypercall(H_INT_GET_QUEUE_CONFIG, h_int_get_queue_config);
1823 spapr_register_hypercall(H_INT_SET_OS_REPORTING_LINE,
1824 h_int_set_os_reporting_line);
1825 spapr_register_hypercall(H_INT_GET_OS_REPORTING_LINE,
1826 h_int_get_os_reporting_line);
1827 spapr_register_hypercall(H_INT_ESB, h_int_esb);
1828 spapr_register_hypercall(H_INT_SYNC, h_int_sync);
1829 spapr_register_hypercall(H_INT_RESET, h_int_reset);
1830 }