]> git.proxmox.com Git - mirror_qemu.git/blob - hw/intc/spapr_xive_kvm.c
51b334b676a15c41af112d2434c95bd44f972481
[mirror_qemu.git] / hw / intc / spapr_xive_kvm.c
1 /*
2 * QEMU PowerPC sPAPR XIVE interrupt controller model
3 *
4 * Copyright (c) 2017-2019, IBM Corporation.
5 *
6 * This code is licensed under the GPL version 2 or later. See the
7 * COPYING file in the top-level directory.
8 */
9
10 #include "qemu/osdep.h"
11 #include "qemu/log.h"
12 #include "qemu/error-report.h"
13 #include "qapi/error.h"
14 #include "target/ppc/cpu.h"
15 #include "sysemu/cpus.h"
16 #include "sysemu/kvm.h"
17 #include "sysemu/runstate.h"
18 #include "hw/ppc/spapr.h"
19 #include "hw/ppc/spapr_cpu_core.h"
20 #include "hw/ppc/spapr_xive.h"
21 #include "hw/ppc/xive.h"
22 #include "kvm_ppc.h"
23
24 #include <sys/ioctl.h>
25
26 /*
27 * Helpers for CPU hotplug
28 *
29 * TODO: make a common KVMEnabledCPU layer for XICS and XIVE
30 */
31 typedef struct KVMEnabledCPU {
32 unsigned long vcpu_id;
33 QLIST_ENTRY(KVMEnabledCPU) node;
34 } KVMEnabledCPU;
35
36 static QLIST_HEAD(, KVMEnabledCPU)
37 kvm_enabled_cpus = QLIST_HEAD_INITIALIZER(&kvm_enabled_cpus);
38
39 static bool kvm_cpu_is_enabled(CPUState *cs)
40 {
41 KVMEnabledCPU *enabled_cpu;
42 unsigned long vcpu_id = kvm_arch_vcpu_id(cs);
43
44 QLIST_FOREACH(enabled_cpu, &kvm_enabled_cpus, node) {
45 if (enabled_cpu->vcpu_id == vcpu_id) {
46 return true;
47 }
48 }
49 return false;
50 }
51
52 static void kvm_cpu_enable(CPUState *cs)
53 {
54 KVMEnabledCPU *enabled_cpu;
55 unsigned long vcpu_id = kvm_arch_vcpu_id(cs);
56
57 enabled_cpu = g_malloc(sizeof(*enabled_cpu));
58 enabled_cpu->vcpu_id = vcpu_id;
59 QLIST_INSERT_HEAD(&kvm_enabled_cpus, enabled_cpu, node);
60 }
61
62 static void kvm_cpu_disable_all(void)
63 {
64 KVMEnabledCPU *enabled_cpu, *next;
65
66 QLIST_FOREACH_SAFE(enabled_cpu, &kvm_enabled_cpus, node, next) {
67 QLIST_REMOVE(enabled_cpu, node);
68 g_free(enabled_cpu);
69 }
70 }
71
72 /*
73 * XIVE Thread Interrupt Management context (KVM)
74 */
75
76 void kvmppc_xive_cpu_set_state(XiveTCTX *tctx, Error **errp)
77 {
78 SpaprXive *xive = SPAPR_MACHINE(qdev_get_machine())->xive;
79 uint64_t state[2];
80 int ret;
81
82 /* The KVM XIVE device is not in use yet */
83 if (xive->fd == -1) {
84 return;
85 }
86
87 /* word0 and word1 of the OS ring. */
88 state[0] = *((uint64_t *) &tctx->regs[TM_QW1_OS]);
89
90 ret = kvm_set_one_reg(tctx->cs, KVM_REG_PPC_VP_STATE, state);
91 if (ret != 0) {
92 error_setg_errno(errp, errno,
93 "XIVE: could not restore KVM state of CPU %ld",
94 kvm_arch_vcpu_id(tctx->cs));
95 }
96 }
97
98 void kvmppc_xive_cpu_get_state(XiveTCTX *tctx, Error **errp)
99 {
100 SpaprXive *xive = SPAPR_MACHINE(qdev_get_machine())->xive;
101 uint64_t state[2] = { 0 };
102 int ret;
103
104 /* The KVM XIVE device is not in use */
105 if (xive->fd == -1) {
106 return;
107 }
108
109 ret = kvm_get_one_reg(tctx->cs, KVM_REG_PPC_VP_STATE, state);
110 if (ret != 0) {
111 error_setg_errno(errp, errno,
112 "XIVE: could not capture KVM state of CPU %ld",
113 kvm_arch_vcpu_id(tctx->cs));
114 return;
115 }
116
117 /* word0 and word1 of the OS ring. */
118 *((uint64_t *) &tctx->regs[TM_QW1_OS]) = state[0];
119 }
120
121 typedef struct {
122 XiveTCTX *tctx;
123 Error *err;
124 } XiveCpuGetState;
125
126 static void kvmppc_xive_cpu_do_synchronize_state(CPUState *cpu,
127 run_on_cpu_data arg)
128 {
129 XiveCpuGetState *s = arg.host_ptr;
130
131 kvmppc_xive_cpu_get_state(s->tctx, &s->err);
132 }
133
134 void kvmppc_xive_cpu_synchronize_state(XiveTCTX *tctx, Error **errp)
135 {
136 XiveCpuGetState s = {
137 .tctx = tctx,
138 .err = NULL,
139 };
140
141 /*
142 * Kick the vCPU to make sure they are available for the KVM ioctl.
143 */
144 run_on_cpu(tctx->cs, kvmppc_xive_cpu_do_synchronize_state,
145 RUN_ON_CPU_HOST_PTR(&s));
146
147 if (s.err) {
148 error_propagate(errp, s.err);
149 return;
150 }
151 }
152
153 void kvmppc_xive_cpu_connect(XiveTCTX *tctx, Error **errp)
154 {
155 SpaprXive *xive = SPAPR_MACHINE(qdev_get_machine())->xive;
156 unsigned long vcpu_id;
157 int ret;
158
159 /* The KVM XIVE device is not in use */
160 if (xive->fd == -1) {
161 return;
162 }
163
164 /* Check if CPU was hot unplugged and replugged. */
165 if (kvm_cpu_is_enabled(tctx->cs)) {
166 return;
167 }
168
169 vcpu_id = kvm_arch_vcpu_id(tctx->cs);
170
171 ret = kvm_vcpu_enable_cap(tctx->cs, KVM_CAP_PPC_IRQ_XIVE, 0, xive->fd,
172 vcpu_id, 0);
173 if (ret < 0) {
174 error_setg(errp, "XIVE: unable to connect CPU%ld to KVM device: %s",
175 vcpu_id, strerror(errno));
176 return;
177 }
178
179 kvm_cpu_enable(tctx->cs);
180 }
181
182 /*
183 * XIVE Interrupt Source (KVM)
184 */
185
186 void kvmppc_xive_set_source_config(SpaprXive *xive, uint32_t lisn, XiveEAS *eas,
187 Error **errp)
188 {
189 uint32_t end_idx;
190 uint32_t end_blk;
191 uint8_t priority;
192 uint32_t server;
193 bool masked;
194 uint32_t eisn;
195 uint64_t kvm_src;
196 Error *local_err = NULL;
197
198 assert(xive_eas_is_valid(eas));
199
200 end_idx = xive_get_field64(EAS_END_INDEX, eas->w);
201 end_blk = xive_get_field64(EAS_END_BLOCK, eas->w);
202 eisn = xive_get_field64(EAS_END_DATA, eas->w);
203 masked = xive_eas_is_masked(eas);
204
205 spapr_xive_end_to_target(end_blk, end_idx, &server, &priority);
206
207 kvm_src = priority << KVM_XIVE_SOURCE_PRIORITY_SHIFT &
208 KVM_XIVE_SOURCE_PRIORITY_MASK;
209 kvm_src |= server << KVM_XIVE_SOURCE_SERVER_SHIFT &
210 KVM_XIVE_SOURCE_SERVER_MASK;
211 kvm_src |= ((uint64_t) masked << KVM_XIVE_SOURCE_MASKED_SHIFT) &
212 KVM_XIVE_SOURCE_MASKED_MASK;
213 kvm_src |= ((uint64_t)eisn << KVM_XIVE_SOURCE_EISN_SHIFT) &
214 KVM_XIVE_SOURCE_EISN_MASK;
215
216 kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_SOURCE_CONFIG, lisn,
217 &kvm_src, true, &local_err);
218 if (local_err) {
219 error_propagate(errp, local_err);
220 return;
221 }
222 }
223
224 void kvmppc_xive_sync_source(SpaprXive *xive, uint32_t lisn, Error **errp)
225 {
226 kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_SOURCE_SYNC, lisn,
227 NULL, true, errp);
228 }
229
230 /*
231 * At reset, the interrupt sources are simply created and MASKED. We
232 * only need to inform the KVM XIVE device about their type: LSI or
233 * MSI.
234 */
235 int kvmppc_xive_source_reset_one(XiveSource *xsrc, int srcno, Error **errp)
236 {
237 SpaprXive *xive = SPAPR_XIVE(xsrc->xive);
238 uint64_t state = 0;
239
240 /* The KVM XIVE device is not in use */
241 if (xive->fd == -1) {
242 return -ENODEV;
243 }
244
245 if (xive_source_irq_is_lsi(xsrc, srcno)) {
246 state |= KVM_XIVE_LEVEL_SENSITIVE;
247 if (xsrc->status[srcno] & XIVE_STATUS_ASSERTED) {
248 state |= KVM_XIVE_LEVEL_ASSERTED;
249 }
250 }
251
252 return kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_SOURCE, srcno, &state,
253 true, errp);
254 }
255
256 static void kvmppc_xive_source_reset(XiveSource *xsrc, Error **errp)
257 {
258 SpaprXive *xive = SPAPR_XIVE(xsrc->xive);
259 int i;
260
261 for (i = 0; i < xsrc->nr_irqs; i++) {
262 Error *local_err = NULL;
263
264 if (!xive_eas_is_valid(&xive->eat[i])) {
265 continue;
266 }
267
268 kvmppc_xive_source_reset_one(xsrc, i, &local_err);
269 if (local_err) {
270 error_propagate(errp, local_err);
271 return;
272 }
273 }
274 }
275
276 /*
277 * This is used to perform the magic loads on the ESB pages, described
278 * in xive.h.
279 *
280 * Memory barriers should not be needed for loads (no store for now).
281 */
282 static uint64_t xive_esb_rw(XiveSource *xsrc, int srcno, uint32_t offset,
283 uint64_t data, bool write)
284 {
285 uint64_t *addr = xsrc->esb_mmap + xive_source_esb_mgmt(xsrc, srcno) +
286 offset;
287
288 if (write) {
289 *addr = cpu_to_be64(data);
290 return -1;
291 } else {
292 /* Prevent the compiler from optimizing away the load */
293 volatile uint64_t value = be64_to_cpu(*addr);
294 return value;
295 }
296 }
297
298 static uint8_t xive_esb_read(XiveSource *xsrc, int srcno, uint32_t offset)
299 {
300 return xive_esb_rw(xsrc, srcno, offset, 0, 0) & 0x3;
301 }
302
303 static void xive_esb_trigger(XiveSource *xsrc, int srcno)
304 {
305 uint64_t *addr = xsrc->esb_mmap + xive_source_esb_page(xsrc, srcno);
306
307 *addr = 0x0;
308 }
309
310 uint64_t kvmppc_xive_esb_rw(XiveSource *xsrc, int srcno, uint32_t offset,
311 uint64_t data, bool write)
312 {
313 if (write) {
314 return xive_esb_rw(xsrc, srcno, offset, data, 1);
315 }
316
317 /*
318 * Special Load EOI handling for LSI sources. Q bit is never set
319 * and the interrupt should be re-triggered if the level is still
320 * asserted.
321 */
322 if (xive_source_irq_is_lsi(xsrc, srcno) &&
323 offset == XIVE_ESB_LOAD_EOI) {
324 xive_esb_read(xsrc, srcno, XIVE_ESB_SET_PQ_00);
325 if (xsrc->status[srcno] & XIVE_STATUS_ASSERTED) {
326 xive_esb_trigger(xsrc, srcno);
327 }
328 return 0;
329 } else {
330 return xive_esb_rw(xsrc, srcno, offset, 0, 0);
331 }
332 }
333
334 static void kvmppc_xive_source_get_state(XiveSource *xsrc)
335 {
336 SpaprXive *xive = SPAPR_XIVE(xsrc->xive);
337 int i;
338
339 for (i = 0; i < xsrc->nr_irqs; i++) {
340 uint8_t pq;
341
342 if (!xive_eas_is_valid(&xive->eat[i])) {
343 continue;
344 }
345
346 /* Perform a load without side effect to retrieve the PQ bits */
347 pq = xive_esb_read(xsrc, i, XIVE_ESB_GET);
348
349 /* and save PQ locally */
350 xive_source_esb_set(xsrc, i, pq);
351 }
352 }
353
354 void kvmppc_xive_source_set_irq(void *opaque, int srcno, int val)
355 {
356 XiveSource *xsrc = opaque;
357 SpaprXive *xive = SPAPR_XIVE(xsrc->xive);
358 struct kvm_irq_level args;
359 int rc;
360
361 /* The KVM XIVE device should be in use */
362 assert(xive->fd != -1);
363
364 args.irq = srcno;
365 if (!xive_source_irq_is_lsi(xsrc, srcno)) {
366 if (!val) {
367 return;
368 }
369 args.level = KVM_INTERRUPT_SET;
370 } else {
371 if (val) {
372 xsrc->status[srcno] |= XIVE_STATUS_ASSERTED;
373 args.level = KVM_INTERRUPT_SET_LEVEL;
374 } else {
375 xsrc->status[srcno] &= ~XIVE_STATUS_ASSERTED;
376 args.level = KVM_INTERRUPT_UNSET;
377 }
378 }
379 rc = kvm_vm_ioctl(kvm_state, KVM_IRQ_LINE, &args);
380 if (rc < 0) {
381 error_report("XIVE: kvm_irq_line() failed : %s", strerror(errno));
382 }
383 }
384
385 /*
386 * sPAPR XIVE interrupt controller (KVM)
387 */
388 void kvmppc_xive_get_queue_config(SpaprXive *xive, uint8_t end_blk,
389 uint32_t end_idx, XiveEND *end,
390 Error **errp)
391 {
392 struct kvm_ppc_xive_eq kvm_eq = { 0 };
393 uint64_t kvm_eq_idx;
394 uint8_t priority;
395 uint32_t server;
396 Error *local_err = NULL;
397
398 assert(xive_end_is_valid(end));
399
400 /* Encode the tuple (server, prio) as a KVM EQ index */
401 spapr_xive_end_to_target(end_blk, end_idx, &server, &priority);
402
403 kvm_eq_idx = priority << KVM_XIVE_EQ_PRIORITY_SHIFT &
404 KVM_XIVE_EQ_PRIORITY_MASK;
405 kvm_eq_idx |= server << KVM_XIVE_EQ_SERVER_SHIFT &
406 KVM_XIVE_EQ_SERVER_MASK;
407
408 kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_EQ_CONFIG, kvm_eq_idx,
409 &kvm_eq, false, &local_err);
410 if (local_err) {
411 error_propagate(errp, local_err);
412 return;
413 }
414
415 /*
416 * The EQ index and toggle bit are updated by HW. These are the
417 * only fields from KVM we want to update QEMU with. The other END
418 * fields should already be in the QEMU END table.
419 */
420 end->w1 = xive_set_field32(END_W1_GENERATION, 0ul, kvm_eq.qtoggle) |
421 xive_set_field32(END_W1_PAGE_OFF, 0ul, kvm_eq.qindex);
422 }
423
424 void kvmppc_xive_set_queue_config(SpaprXive *xive, uint8_t end_blk,
425 uint32_t end_idx, XiveEND *end,
426 Error **errp)
427 {
428 struct kvm_ppc_xive_eq kvm_eq = { 0 };
429 uint64_t kvm_eq_idx;
430 uint8_t priority;
431 uint32_t server;
432 Error *local_err = NULL;
433
434 /*
435 * Build the KVM state from the local END structure.
436 */
437
438 kvm_eq.flags = 0;
439 if (xive_get_field32(END_W0_UCOND_NOTIFY, end->w0)) {
440 kvm_eq.flags |= KVM_XIVE_EQ_ALWAYS_NOTIFY;
441 }
442
443 /*
444 * If the hcall is disabling the EQ, set the size and page address
445 * to zero. When migrating, only valid ENDs are taken into
446 * account.
447 */
448 if (xive_end_is_valid(end)) {
449 kvm_eq.qshift = xive_get_field32(END_W0_QSIZE, end->w0) + 12;
450 kvm_eq.qaddr = xive_end_qaddr(end);
451 /*
452 * The EQ toggle bit and index should only be relevant when
453 * restoring the EQ state
454 */
455 kvm_eq.qtoggle = xive_get_field32(END_W1_GENERATION, end->w1);
456 kvm_eq.qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
457 } else {
458 kvm_eq.qshift = 0;
459 kvm_eq.qaddr = 0;
460 }
461
462 /* Encode the tuple (server, prio) as a KVM EQ index */
463 spapr_xive_end_to_target(end_blk, end_idx, &server, &priority);
464
465 kvm_eq_idx = priority << KVM_XIVE_EQ_PRIORITY_SHIFT &
466 KVM_XIVE_EQ_PRIORITY_MASK;
467 kvm_eq_idx |= server << KVM_XIVE_EQ_SERVER_SHIFT &
468 KVM_XIVE_EQ_SERVER_MASK;
469
470 kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_EQ_CONFIG, kvm_eq_idx,
471 &kvm_eq, true, &local_err);
472 if (local_err) {
473 error_propagate(errp, local_err);
474 return;
475 }
476 }
477
478 void kvmppc_xive_reset(SpaprXive *xive, Error **errp)
479 {
480 kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_CTRL, KVM_DEV_XIVE_RESET,
481 NULL, true, errp);
482 }
483
484 static void kvmppc_xive_get_queues(SpaprXive *xive, Error **errp)
485 {
486 Error *local_err = NULL;
487 int i;
488
489 for (i = 0; i < xive->nr_ends; i++) {
490 if (!xive_end_is_valid(&xive->endt[i])) {
491 continue;
492 }
493
494 kvmppc_xive_get_queue_config(xive, SPAPR_XIVE_BLOCK_ID, i,
495 &xive->endt[i], &local_err);
496 if (local_err) {
497 error_propagate(errp, local_err);
498 return;
499 }
500 }
501 }
502
503 /*
504 * The primary goal of the XIVE VM change handler is to mark the EQ
505 * pages dirty when all XIVE event notifications have stopped.
506 *
507 * Whenever the VM is stopped, the VM change handler sets the source
508 * PQs to PENDING to stop the flow of events and to possibly catch a
509 * triggered interrupt occuring while the VM is stopped. The previous
510 * state is saved in anticipation of a migration. The XIVE controller
511 * is then synced through KVM to flush any in-flight event
512 * notification and stabilize the EQs.
513 *
514 * At this stage, we can mark the EQ page dirty and let a migration
515 * sequence transfer the EQ pages to the destination, which is done
516 * just after the stop state.
517 *
518 * The previous configuration of the sources is restored when the VM
519 * runs again. If an interrupt was queued while the VM was stopped,
520 * simply generate a trigger.
521 */
522 static void kvmppc_xive_change_state_handler(void *opaque, int running,
523 RunState state)
524 {
525 SpaprXive *xive = opaque;
526 XiveSource *xsrc = &xive->source;
527 Error *local_err = NULL;
528 int i;
529
530 /*
531 * Restore the sources to their initial state. This is called when
532 * the VM resumes after a stop or a migration.
533 */
534 if (running) {
535 for (i = 0; i < xsrc->nr_irqs; i++) {
536 uint8_t pq;
537 uint8_t old_pq;
538
539 if (!xive_eas_is_valid(&xive->eat[i])) {
540 continue;
541 }
542
543 pq = xive_source_esb_get(xsrc, i);
544 old_pq = xive_esb_read(xsrc, i, XIVE_ESB_SET_PQ_00 + (pq << 8));
545
546 /*
547 * An interrupt was queued while the VM was stopped,
548 * generate a trigger.
549 */
550 if (pq == XIVE_ESB_RESET && old_pq == XIVE_ESB_QUEUED) {
551 xive_esb_trigger(xsrc, i);
552 }
553 }
554
555 return;
556 }
557
558 /*
559 * Mask the sources, to stop the flow of event notifications, and
560 * save the PQs locally in the XiveSource object. The XiveSource
561 * state will be collected later on by its vmstate handler if a
562 * migration is in progress.
563 */
564 for (i = 0; i < xsrc->nr_irqs; i++) {
565 uint8_t pq;
566
567 if (!xive_eas_is_valid(&xive->eat[i])) {
568 continue;
569 }
570
571 pq = xive_esb_read(xsrc, i, XIVE_ESB_GET);
572
573 /*
574 * PQ is set to PENDING to possibly catch a triggered
575 * interrupt occuring while the VM is stopped (hotplug event
576 * for instance) .
577 */
578 if (pq != XIVE_ESB_OFF) {
579 pq = xive_esb_read(xsrc, i, XIVE_ESB_SET_PQ_10);
580 }
581 xive_source_esb_set(xsrc, i, pq);
582 }
583
584 /*
585 * Sync the XIVE controller in KVM, to flush in-flight event
586 * notification that should be enqueued in the EQs and mark the
587 * XIVE EQ pages dirty to collect all updates.
588 */
589 kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_CTRL,
590 KVM_DEV_XIVE_EQ_SYNC, NULL, true, &local_err);
591 if (local_err) {
592 error_report_err(local_err);
593 return;
594 }
595 }
596
597 void kvmppc_xive_synchronize_state(SpaprXive *xive, Error **errp)
598 {
599 /* The KVM XIVE device is not in use */
600 if (xive->fd == -1) {
601 return;
602 }
603
604 /*
605 * When the VM is stopped, the sources are masked and the previous
606 * state is saved in anticipation of a migration. We should not
607 * synchronize the source state in that case else we will override
608 * the saved state.
609 */
610 if (runstate_is_running()) {
611 kvmppc_xive_source_get_state(&xive->source);
612 }
613
614 /* EAT: there is no extra state to query from KVM */
615
616 /* ENDT */
617 kvmppc_xive_get_queues(xive, errp);
618 }
619
620 /*
621 * The SpaprXive 'pre_save' method is called by the vmstate handler of
622 * the SpaprXive model, after the XIVE controller is synced in the VM
623 * change handler.
624 */
625 int kvmppc_xive_pre_save(SpaprXive *xive)
626 {
627 Error *local_err = NULL;
628
629 /* The KVM XIVE device is not in use */
630 if (xive->fd == -1) {
631 return 0;
632 }
633
634 /* EAT: there is no extra state to query from KVM */
635
636 /* ENDT */
637 kvmppc_xive_get_queues(xive, &local_err);
638 if (local_err) {
639 error_report_err(local_err);
640 return -1;
641 }
642
643 return 0;
644 }
645
646 /*
647 * The SpaprXive 'post_load' method is not called by a vmstate
648 * handler. It is called at the sPAPR machine level at the end of the
649 * migration sequence by the sPAPR IRQ backend 'post_load' method,
650 * when all XIVE states have been transferred and loaded.
651 */
652 int kvmppc_xive_post_load(SpaprXive *xive, int version_id)
653 {
654 Error *local_err = NULL;
655 CPUState *cs;
656 int i;
657
658 /* The KVM XIVE device should be in use */
659 assert(xive->fd != -1);
660
661 /* Restore the ENDT first. The targetting depends on it. */
662 for (i = 0; i < xive->nr_ends; i++) {
663 if (!xive_end_is_valid(&xive->endt[i])) {
664 continue;
665 }
666
667 kvmppc_xive_set_queue_config(xive, SPAPR_XIVE_BLOCK_ID, i,
668 &xive->endt[i], &local_err);
669 if (local_err) {
670 error_report_err(local_err);
671 return -1;
672 }
673 }
674
675 /* Restore the EAT */
676 for (i = 0; i < xive->nr_irqs; i++) {
677 if (!xive_eas_is_valid(&xive->eat[i])) {
678 continue;
679 }
680
681 /*
682 * We can only restore the source config if the source has been
683 * previously set in KVM. Since we don't do that for all interrupts
684 * at reset time anymore, let's do it now.
685 */
686 kvmppc_xive_source_reset_one(&xive->source, i, &local_err);
687 if (local_err) {
688 error_report_err(local_err);
689 return -1;
690 }
691
692 kvmppc_xive_set_source_config(xive, i, &xive->eat[i], &local_err);
693 if (local_err) {
694 error_report_err(local_err);
695 return -1;
696 }
697 }
698
699 /*
700 * Restore the thread interrupt contexts of initial CPUs.
701 *
702 * The context of hotplugged CPUs is restored later, by the
703 * 'post_load' handler of the XiveTCTX model because they are not
704 * available at the time the SpaprXive 'post_load' method is
705 * called. We can not restore the context of all CPUs in the
706 * 'post_load' handler of XiveTCTX because the machine is not
707 * necessarily connected to the KVM device at that time.
708 */
709 CPU_FOREACH(cs) {
710 PowerPCCPU *cpu = POWERPC_CPU(cs);
711
712 kvmppc_xive_cpu_set_state(spapr_cpu_state(cpu)->tctx, &local_err);
713 if (local_err) {
714 error_report_err(local_err);
715 return -1;
716 }
717 }
718
719 /* The source states will be restored when the machine starts running */
720 return 0;
721 }
722
723 static void *kvmppc_xive_mmap(SpaprXive *xive, int pgoff, size_t len,
724 Error **errp)
725 {
726 void *addr;
727 uint32_t page_shift = 16; /* TODO: fix page_shift */
728
729 addr = mmap(NULL, len, PROT_WRITE | PROT_READ, MAP_SHARED, xive->fd,
730 pgoff << page_shift);
731 if (addr == MAP_FAILED) {
732 error_setg_errno(errp, errno, "XIVE: unable to set memory mapping");
733 return NULL;
734 }
735
736 return addr;
737 }
738
739 /*
740 * All the XIVE memory regions are now backed by mappings from the KVM
741 * XIVE device.
742 */
743 void kvmppc_xive_connect(SpaprXive *xive, Error **errp)
744 {
745 XiveSource *xsrc = &xive->source;
746 Error *local_err = NULL;
747 size_t esb_len = (1ull << xsrc->esb_shift) * xsrc->nr_irqs;
748 size_t tima_len = 4ull << TM_SHIFT;
749 CPUState *cs;
750
751 /*
752 * The KVM XIVE device already in use. This is the case when
753 * rebooting under the XIVE-only interrupt mode.
754 */
755 if (xive->fd != -1) {
756 return;
757 }
758
759 if (!kvmppc_has_cap_xive()) {
760 error_setg(errp, "IRQ_XIVE capability must be present for KVM");
761 return;
762 }
763
764 /* First, create the KVM XIVE device */
765 xive->fd = kvm_create_device(kvm_state, KVM_DEV_TYPE_XIVE, false);
766 if (xive->fd < 0) {
767 error_setg_errno(errp, -xive->fd, "XIVE: error creating KVM device");
768 return;
769 }
770
771 /*
772 * 1. Source ESB pages - KVM mapping
773 */
774 xsrc->esb_mmap = kvmppc_xive_mmap(xive, KVM_XIVE_ESB_PAGE_OFFSET, esb_len,
775 &local_err);
776 if (local_err) {
777 goto fail;
778 }
779
780 memory_region_init_ram_device_ptr(&xsrc->esb_mmio_kvm, OBJECT(xsrc),
781 "xive.esb", esb_len, xsrc->esb_mmap);
782 memory_region_add_subregion_overlap(&xsrc->esb_mmio, 0,
783 &xsrc->esb_mmio_kvm, 1);
784
785 /*
786 * 2. END ESB pages (No KVM support yet)
787 */
788
789 /*
790 * 3. TIMA pages - KVM mapping
791 */
792 xive->tm_mmap = kvmppc_xive_mmap(xive, KVM_XIVE_TIMA_PAGE_OFFSET, tima_len,
793 &local_err);
794 if (local_err) {
795 goto fail;
796 }
797 memory_region_init_ram_device_ptr(&xive->tm_mmio_kvm, OBJECT(xive),
798 "xive.tima", tima_len, xive->tm_mmap);
799 memory_region_add_subregion_overlap(&xive->tm_mmio, 0,
800 &xive->tm_mmio_kvm, 1);
801
802 xive->change = qemu_add_vm_change_state_handler(
803 kvmppc_xive_change_state_handler, xive);
804
805 /* Connect the presenters to the initial VCPUs of the machine */
806 CPU_FOREACH(cs) {
807 PowerPCCPU *cpu = POWERPC_CPU(cs);
808
809 kvmppc_xive_cpu_connect(spapr_cpu_state(cpu)->tctx, &local_err);
810 if (local_err) {
811 goto fail;
812 }
813 }
814
815 /* Update the KVM sources */
816 kvmppc_xive_source_reset(xsrc, &local_err);
817 if (local_err) {
818 goto fail;
819 }
820
821 kvm_kernel_irqchip = true;
822 kvm_msi_via_irqfd_allowed = true;
823 kvm_gsi_direct_mapping = true;
824 return;
825
826 fail:
827 error_propagate(errp, local_err);
828 kvmppc_xive_disconnect(xive, NULL);
829 }
830
831 void kvmppc_xive_disconnect(SpaprXive *xive, Error **errp)
832 {
833 XiveSource *xsrc;
834 size_t esb_len;
835
836 /* The KVM XIVE device is not in use */
837 if (!xive || xive->fd == -1) {
838 return;
839 }
840
841 if (!kvmppc_has_cap_xive()) {
842 error_setg(errp, "IRQ_XIVE capability must be present for KVM");
843 return;
844 }
845
846 /* Clear the KVM mapping */
847 xsrc = &xive->source;
848 esb_len = (1ull << xsrc->esb_shift) * xsrc->nr_irqs;
849
850 if (xsrc->esb_mmap) {
851 memory_region_del_subregion(&xsrc->esb_mmio, &xsrc->esb_mmio_kvm);
852 object_unparent(OBJECT(&xsrc->esb_mmio_kvm));
853 munmap(xsrc->esb_mmap, esb_len);
854 xsrc->esb_mmap = NULL;
855 }
856
857 if (xive->tm_mmap) {
858 memory_region_del_subregion(&xive->tm_mmio, &xive->tm_mmio_kvm);
859 object_unparent(OBJECT(&xive->tm_mmio_kvm));
860 munmap(xive->tm_mmap, 4ull << TM_SHIFT);
861 xive->tm_mmap = NULL;
862 }
863
864 /*
865 * When the KVM device fd is closed, the KVM device is destroyed
866 * and removed from the list of devices of the VM. The VCPU
867 * presenters are also detached from the device.
868 */
869 if (xive->fd != -1) {
870 close(xive->fd);
871 xive->fd = -1;
872 }
873
874 kvm_kernel_irqchip = false;
875 kvm_msi_via_irqfd_allowed = false;
876 kvm_gsi_direct_mapping = false;
877
878 /* Clear the local list of presenter (hotplug) */
879 kvm_cpu_disable_all();
880
881 /* VM Change state handler is not needed anymore */
882 if (xive->change) {
883 qemu_del_vm_change_state_handler(xive->change);
884 xive->change = NULL;
885 }
886 }