]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - arch/powerpc/kvm/book3s_xive.c
treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 500
[mirror_ubuntu-hirsute-kernel.git] / arch / powerpc / kvm / book3s_xive.c
CommitLineData
d2912cb1 1// SPDX-License-Identifier: GPL-2.0-only
5af50993
BH
2/*
3 * Copyright 2017 Benjamin Herrenschmidt, IBM Corporation.
5af50993
BH
4 */
5
6#define pr_fmt(fmt) "xive-kvm: " fmt
7
8#include <linux/kernel.h>
9#include <linux/kvm_host.h>
10#include <linux/err.h>
11#include <linux/gfp.h>
12#include <linux/spinlock.h>
13#include <linux/delay.h>
14#include <linux/percpu.h>
15#include <linux/cpumask.h>
5bb866de 16#include <linux/uaccess.h>
5af50993
BH
17#include <asm/kvm_book3s.h>
18#include <asm/kvm_ppc.h>
19#include <asm/hvcall.h>
20#include <asm/xics.h>
21#include <asm/xive.h>
22#include <asm/xive-regs.h>
23#include <asm/debug.h>
4415b335 24#include <asm/debugfs.h>
5af50993
BH
25#include <asm/time.h>
26#include <asm/opal.h>
27
28#include <linux/debugfs.h>
29#include <linux/seq_file.h>
30
31#include "book3s_xive.h"
32
33
34/*
35 * Virtual mode variants of the hcalls for use on radix/radix
36 * with AIL. They require the VCPU's VP to be "pushed"
37 *
3cc97bea 38 * We still instantiate them here because we use some of the
5af50993
BH
39 * generated utility functions as well in this file.
40 */
41#define XIVE_RUNTIME_CHECKS
42#define X_PFX xive_vm_
43#define X_STATIC static
44#define X_STAT_PFX stat_vm_
45#define __x_tima xive_tima
46#define __x_eoi_page(xd) ((void __iomem *)((xd)->eoi_mmio))
47#define __x_trig_page(xd) ((void __iomem *)((xd)->trig_mmio))
5af50993
BH
48#define __x_writeb __raw_writeb
49#define __x_readw __raw_readw
50#define __x_readq __raw_readq
51#define __x_writeq __raw_writeq
52
53#include "book3s_xive_template.c"
54
55/*
56 * We leave a gap of a couple of interrupts in the queue to
57 * account for the IPI and additional safety guard.
58 */
59#define XIVE_Q_GAP 2
60
95a6432c
PM
61/*
62 * Push a vcpu's context to the XIVE on guest entry.
63 * This assumes we are in virtual mode (MMU on)
64 */
65void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu)
66{
67 void __iomem *tima = local_paca->kvm_hstate.xive_tima_virt;
68 u64 pq;
69
70 if (!tima)
71 return;
72 eieio();
73 __raw_writeq(vcpu->arch.xive_saved_state.w01, tima + TM_QW1_OS);
74 __raw_writel(vcpu->arch.xive_cam_word, tima + TM_QW1_OS + TM_WORD2);
75 vcpu->arch.xive_pushed = 1;
76 eieio();
77
78 /*
79 * We clear the irq_pending flag. There is a small chance of a
80 * race vs. the escalation interrupt happening on another
81 * processor setting it again, but the only consequence is to
82 * cause a spurious wakeup on the next H_CEDE, which is not an
83 * issue.
84 */
85 vcpu->arch.irq_pending = 0;
86
87 /*
88 * In single escalation mode, if the escalation interrupt is
89 * on, we mask it.
90 */
91 if (vcpu->arch.xive_esc_on) {
92 pq = __raw_readq((void __iomem *)(vcpu->arch.xive_esc_vaddr +
93 XIVE_ESB_SET_PQ_01));
94 mb();
95
96 /*
97 * We have a possible subtle race here: The escalation
98 * interrupt might have fired and be on its way to the
99 * host queue while we mask it, and if we unmask it
100 * early enough (re-cede right away), there is a
101 * theorical possibility that it fires again, thus
102 * landing in the target queue more than once which is
103 * a big no-no.
104 *
105 * Fortunately, solving this is rather easy. If the
106 * above load setting PQ to 01 returns a previous
107 * value where P is set, then we know the escalation
108 * interrupt is somewhere on its way to the host. In
109 * that case we simply don't clear the xive_esc_on
110 * flag below. It will be eventually cleared by the
111 * handler for the escalation interrupt.
112 *
113 * Then, when doing a cede, we check that flag again
114 * before re-enabling the escalation interrupt, and if
115 * set, we abort the cede.
116 */
117 if (!(pq & XIVE_ESB_VAL_P))
118 /* Now P is 0, we can clear the flag */
119 vcpu->arch.xive_esc_on = 0;
120 }
121}
122EXPORT_SYMBOL_GPL(kvmppc_xive_push_vcpu);
123
5af50993
BH
124/*
125 * This is a simple trigger for a generic XIVE IRQ. This must
126 * only be called for interrupts that support a trigger page
127 */
128static bool xive_irq_trigger(struct xive_irq_data *xd)
129{
130 /* This should be only for MSIs */
131 if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI))
132 return false;
133
134 /* Those interrupts should always have a trigger page */
135 if (WARN_ON(!xd->trig_mmio))
136 return false;
137
138 out_be64(xd->trig_mmio, 0);
139
140 return true;
141}
142
143static irqreturn_t xive_esc_irq(int irq, void *data)
144{
145 struct kvm_vcpu *vcpu = data;
146
2267ea76 147 vcpu->arch.irq_pending = 1;
5af50993
BH
148 smp_mb();
149 if (vcpu->arch.ceded)
150 kvmppc_fast_vcpu_kick(vcpu);
151
9b9b13a6
BH
152 /* Since we have the no-EOI flag, the interrupt is effectively
153 * disabled now. Clearing xive_esc_on means we won't bother
154 * doing so on the next entry.
155 *
156 * This also allows the entry code to know that if a PQ combination
157 * of 10 is observed while xive_esc_on is true, it means the queue
158 * contains an unprocessed escalation interrupt. We don't make use of
159 * that knowledge today but might (see comment in book3s_hv_rmhandler.S)
160 */
161 vcpu->arch.xive_esc_on = false;
162
5af50993
BH
163 return IRQ_HANDLED;
164}
165
13ce3297
CLG
166int kvmppc_xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio,
167 bool single_escalation)
5af50993
BH
168{
169 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
170 struct xive_q *q = &xc->queues[prio];
171 char *name = NULL;
172 int rc;
173
174 /* Already there ? */
175 if (xc->esc_virq[prio])
176 return 0;
177
178 /* Hook up the escalation interrupt */
179 xc->esc_virq[prio] = irq_create_mapping(NULL, q->esc_irq);
180 if (!xc->esc_virq[prio]) {
181 pr_err("Failed to map escalation interrupt for queue %d of VCPU %d\n",
182 prio, xc->server_num);
183 return -EIO;
184 }
185
13ce3297 186 if (single_escalation)
bf4159da
BH
187 name = kasprintf(GFP_KERNEL, "kvm-%d-%d",
188 vcpu->kvm->arch.lpid, xc->server_num);
189 else
190 name = kasprintf(GFP_KERNEL, "kvm-%d-%d-%d",
191 vcpu->kvm->arch.lpid, xc->server_num, prio);
5af50993
BH
192 if (!name) {
193 pr_err("Failed to allocate escalation irq name for queue %d of VCPU %d\n",
194 prio, xc->server_num);
195 rc = -ENOMEM;
196 goto error;
197 }
bf4159da
BH
198
199 pr_devel("Escalation %s irq %d (prio %d)\n", name, xc->esc_virq[prio], prio);
200
5af50993
BH
201 rc = request_irq(xc->esc_virq[prio], xive_esc_irq,
202 IRQF_NO_THREAD, name, vcpu);
203 if (rc) {
204 pr_err("Failed to request escalation interrupt for queue %d of VCPU %d\n",
205 prio, xc->server_num);
206 goto error;
207 }
208 xc->esc_virq_names[prio] = name;
9b9b13a6
BH
209
210 /* In single escalation mode, we grab the ESB MMIO of the
211 * interrupt and mask it. Also populate the VCPU v/raddr
212 * of the ESB page for use by asm entry/exit code. Finally
213 * set the XIVE_IRQ_NO_EOI flag which will prevent the
214 * core code from performing an EOI on the escalation
215 * interrupt, thus leaving it effectively masked after
216 * it fires once.
217 */
13ce3297 218 if (single_escalation) {
9b9b13a6
BH
219 struct irq_data *d = irq_get_irq_data(xc->esc_virq[prio]);
220 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
221
222 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01);
223 vcpu->arch.xive_esc_raddr = xd->eoi_page;
224 vcpu->arch.xive_esc_vaddr = (__force u64)xd->eoi_mmio;
225 xd->flags |= XIVE_IRQ_NO_EOI;
226 }
227
5af50993
BH
228 return 0;
229error:
230 irq_dispose_mapping(xc->esc_virq[prio]);
231 xc->esc_virq[prio] = 0;
232 kfree(name);
233 return rc;
234}
235
236static int xive_provision_queue(struct kvm_vcpu *vcpu, u8 prio)
237{
238 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
239 struct kvmppc_xive *xive = xc->xive;
240 struct xive_q *q = &xc->queues[prio];
241 void *qpage;
242 int rc;
243
244 if (WARN_ON(q->qpage))
245 return 0;
246
247 /* Allocate the queue and retrieve infos on current node for now */
248 qpage = (__be32 *)__get_free_pages(GFP_KERNEL, xive->q_page_order);
249 if (!qpage) {
250 pr_err("Failed to allocate queue %d for VCPU %d\n",
251 prio, xc->server_num);
ed7158ba 252 return -ENOMEM;
5af50993
BH
253 }
254 memset(qpage, 0, 1 << xive->q_order);
255
256 /*
257 * Reconfigure the queue. This will set q->qpage only once the
258 * queue is fully configured. This is a requirement for prio 0
259 * as we will stop doing EOIs for every IPI as soon as we observe
260 * qpage being non-NULL, and instead will only EOI when we receive
261 * corresponding queue 0 entries
262 */
263 rc = xive_native_configure_queue(xc->vp_id, q, prio, qpage,
264 xive->q_order, true);
265 if (rc)
266 pr_err("Failed to configure queue %d for VCPU %d\n",
267 prio, xc->server_num);
268 return rc;
269}
270
7e10b9a6 271/* Called with xive->lock held */
5af50993
BH
272static int xive_check_provisioning(struct kvm *kvm, u8 prio)
273{
274 struct kvmppc_xive *xive = kvm->arch.xive;
275 struct kvm_vcpu *vcpu;
276 int i, rc;
277
7e10b9a6 278 lockdep_assert_held(&xive->lock);
5af50993
BH
279
280 /* Already provisioned ? */
281 if (xive->qmap & (1 << prio))
282 return 0;
283
284 pr_devel("Provisioning prio... %d\n", prio);
285
bf4159da 286 /* Provision each VCPU and enable escalations if needed */
5af50993
BH
287 kvm_for_each_vcpu(i, vcpu, kvm) {
288 if (!vcpu->arch.xive_vcpu)
289 continue;
290 rc = xive_provision_queue(vcpu, prio);
bf4159da 291 if (rc == 0 && !xive->single_escalation)
13ce3297
CLG
292 kvmppc_xive_attach_escalation(vcpu, prio,
293 xive->single_escalation);
5af50993
BH
294 if (rc)
295 return rc;
296 }
297
298 /* Order previous stores and mark it as provisioned */
299 mb();
300 xive->qmap |= (1 << prio);
301 return 0;
302}
303
304static void xive_inc_q_pending(struct kvm *kvm, u32 server, u8 prio)
305{
306 struct kvm_vcpu *vcpu;
307 struct kvmppc_xive_vcpu *xc;
308 struct xive_q *q;
309
310 /* Locate target server */
311 vcpu = kvmppc_xive_find_server(kvm, server);
312 if (!vcpu) {
313 pr_warn("%s: Can't find server %d\n", __func__, server);
314 return;
315 }
316 xc = vcpu->arch.xive_vcpu;
317 if (WARN_ON(!xc))
318 return;
319
320 q = &xc->queues[prio];
321 atomic_inc(&q->pending_count);
322}
323
324static int xive_try_pick_queue(struct kvm_vcpu *vcpu, u8 prio)
325{
326 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
327 struct xive_q *q;
328 u32 max;
329
330 if (WARN_ON(!xc))
331 return -ENXIO;
332 if (!xc->valid)
333 return -ENXIO;
334
335 q = &xc->queues[prio];
336 if (WARN_ON(!q->qpage))
337 return -ENXIO;
338
339 /* Calculate max number of interrupts in that queue. */
340 max = (q->msk + 1) - XIVE_Q_GAP;
341 return atomic_add_unless(&q->count, 1, max) ? 0 : -EBUSY;
342}
343
e8676ce5 344int kvmppc_xive_select_target(struct kvm *kvm, u32 *server, u8 prio)
5af50993
BH
345{
346 struct kvm_vcpu *vcpu;
347 int i, rc;
348
349 /* Locate target server */
350 vcpu = kvmppc_xive_find_server(kvm, *server);
351 if (!vcpu) {
352 pr_devel("Can't find server %d\n", *server);
353 return -EINVAL;
354 }
355
356 pr_devel("Finding irq target on 0x%x/%d...\n", *server, prio);
357
358 /* Try pick it */
359 rc = xive_try_pick_queue(vcpu, prio);
360 if (rc == 0)
361 return rc;
362
363 pr_devel(" .. failed, looking up candidate...\n");
364
365 /* Failed, pick another VCPU */
366 kvm_for_each_vcpu(i, vcpu, kvm) {
367 if (!vcpu->arch.xive_vcpu)
368 continue;
369 rc = xive_try_pick_queue(vcpu, prio);
370 if (rc == 0) {
371 *server = vcpu->arch.xive_vcpu->server_num;
372 pr_devel(" found on 0x%x/%d\n", *server, prio);
373 return rc;
374 }
375 }
376 pr_devel(" no available target !\n");
377
378 /* No available target ! */
379 return -EBUSY;
380}
381
382static u8 xive_lock_and_mask(struct kvmppc_xive *xive,
383 struct kvmppc_xive_src_block *sb,
384 struct kvmppc_xive_irq_state *state)
385{
386 struct xive_irq_data *xd;
387 u32 hw_num;
388 u8 old_prio;
389 u64 val;
390
391 /*
392 * Take the lock, set masked, try again if racing
393 * with H_EOI
394 */
395 for (;;) {
396 arch_spin_lock(&sb->lock);
397 old_prio = state->guest_priority;
398 state->guest_priority = MASKED;
399 mb();
400 if (!state->in_eoi)
401 break;
402 state->guest_priority = old_prio;
403 arch_spin_unlock(&sb->lock);
404 }
405
406 /* No change ? Bail */
407 if (old_prio == MASKED)
408 return old_prio;
409
410 /* Get the right irq */
411 kvmppc_xive_select_irq(state, &hw_num, &xd);
412
413 /*
414 * If the interrupt is marked as needing masking via
415 * firmware, we do it here. Firmware masking however
416 * is "lossy", it won't return the old p and q bits
417 * and won't set the interrupt to a state where it will
418 * record queued ones. If this is an issue we should do
419 * lazy masking instead.
420 *
421 * For now, we work around this in unmask by forcing
422 * an interrupt whenever we unmask a non-LSI via FW
423 * (if ever).
424 */
425 if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) {
426 xive_native_configure_irq(hw_num,
eacc56bb
CLG
427 kvmppc_xive_vp(xive, state->act_server),
428 MASKED, state->number);
5af50993
BH
429 /* set old_p so we can track if an H_EOI was done */
430 state->old_p = true;
431 state->old_q = false;
432 } else {
433 /* Set PQ to 10, return old P and old Q and remember them */
434 val = xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_10);
435 state->old_p = !!(val & 2);
436 state->old_q = !!(val & 1);
437
438 /*
439 * Synchronize hardware to sensure the queues are updated
440 * when masking
441 */
442 xive_native_sync_source(hw_num);
443 }
444
445 return old_prio;
446}
447
448static void xive_lock_for_unmask(struct kvmppc_xive_src_block *sb,
449 struct kvmppc_xive_irq_state *state)
450{
451 /*
452 * Take the lock try again if racing with H_EOI
453 */
454 for (;;) {
455 arch_spin_lock(&sb->lock);
456 if (!state->in_eoi)
457 break;
458 arch_spin_unlock(&sb->lock);
459 }
460}
461
462static void xive_finish_unmask(struct kvmppc_xive *xive,
463 struct kvmppc_xive_src_block *sb,
464 struct kvmppc_xive_irq_state *state,
465 u8 prio)
466{
467 struct xive_irq_data *xd;
468 u32 hw_num;
469
470 /* If we aren't changing a thing, move on */
471 if (state->guest_priority != MASKED)
472 goto bail;
473
474 /* Get the right irq */
475 kvmppc_xive_select_irq(state, &hw_num, &xd);
476
477 /*
478 * See command in xive_lock_and_mask() concerning masking
479 * via firmware.
480 */
481 if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) {
482 xive_native_configure_irq(hw_num,
eacc56bb
CLG
483 kvmppc_xive_vp(xive, state->act_server),
484 state->act_priority, state->number);
5af50993
BH
485 /* If an EOI is needed, do it here */
486 if (!state->old_p)
487 xive_vm_source_eoi(hw_num, xd);
488 /* If this is not an LSI, force a trigger */
489 if (!(xd->flags & OPAL_XIVE_IRQ_LSI))
490 xive_irq_trigger(xd);
491 goto bail;
492 }
493
494 /* Old Q set, set PQ to 11 */
495 if (state->old_q)
496 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_11);
497
498 /*
499 * If not old P, then perform an "effective" EOI,
500 * on the source. This will handle the cases where
501 * FW EOI is needed.
502 */
503 if (!state->old_p)
504 xive_vm_source_eoi(hw_num, xd);
505
506 /* Synchronize ordering and mark unmasked */
507 mb();
508bail:
509 state->guest_priority = prio;
510}
511
512/*
513 * Target an interrupt to a given server/prio, this will fallback
514 * to another server if necessary and perform the HW targetting
515 * updates as needed
516 *
517 * NOTE: Must be called with the state lock held
518 */
519static int xive_target_interrupt(struct kvm *kvm,
520 struct kvmppc_xive_irq_state *state,
521 u32 server, u8 prio)
522{
523 struct kvmppc_xive *xive = kvm->arch.xive;
524 u32 hw_num;
525 int rc;
526
527 /*
528 * This will return a tentative server and actual
529 * priority. The count for that new target will have
530 * already been incremented.
531 */
e8676ce5 532 rc = kvmppc_xive_select_target(kvm, &server, prio);
5af50993
BH
533
534 /*
535 * We failed to find a target ? Not much we can do
536 * at least until we support the GIQ.
537 */
538 if (rc)
539 return rc;
540
541 /*
542 * Increment the old queue pending count if there
543 * was one so that the old queue count gets adjusted later
544 * when observed to be empty.
545 */
546 if (state->act_priority != MASKED)
547 xive_inc_q_pending(kvm,
548 state->act_server,
549 state->act_priority);
550 /*
551 * Update state and HW
552 */
553 state->act_priority = prio;
554 state->act_server = server;
555
556 /* Get the right irq */
557 kvmppc_xive_select_irq(state, &hw_num, NULL);
558
559 return xive_native_configure_irq(hw_num,
eacc56bb 560 kvmppc_xive_vp(xive, server),
5af50993
BH
561 prio, state->number);
562}
563
564/*
565 * Targetting rules: In order to avoid losing track of
566 * pending interrupts accross mask and unmask, which would
567 * allow queue overflows, we implement the following rules:
568 *
569 * - Unless it was never enabled (or we run out of capacity)
570 * an interrupt is always targetted at a valid server/queue
571 * pair even when "masked" by the guest. This pair tends to
572 * be the last one used but it can be changed under some
573 * circumstances. That allows us to separate targetting
574 * from masking, we only handle accounting during (re)targetting,
575 * this also allows us to let an interrupt drain into its target
576 * queue after masking, avoiding complex schemes to remove
577 * interrupts out of remote processor queues.
578 *
579 * - When masking, we set PQ to 10 and save the previous value
580 * of P and Q.
581 *
582 * - When unmasking, if saved Q was set, we set PQ to 11
583 * otherwise we leave PQ to the HW state which will be either
584 * 10 if nothing happened or 11 if the interrupt fired while
585 * masked. Effectively we are OR'ing the previous Q into the
586 * HW Q.
587 *
588 * Then if saved P is clear, we do an effective EOI (Q->P->Trigger)
589 * which will unmask the interrupt and shoot a new one if Q was
590 * set.
591 *
592 * Otherwise (saved P is set) we leave PQ unchanged (so 10 or 11,
593 * effectively meaning an H_EOI from the guest is still expected
594 * for that interrupt).
595 *
596 * - If H_EOI occurs while masked, we clear the saved P.
597 *
598 * - When changing target, we account on the new target and
599 * increment a separate "pending" counter on the old one.
600 * This pending counter will be used to decrement the old
601 * target's count when its queue has been observed empty.
602 */
603
604int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
605 u32 priority)
606{
607 struct kvmppc_xive *xive = kvm->arch.xive;
608 struct kvmppc_xive_src_block *sb;
609 struct kvmppc_xive_irq_state *state;
610 u8 new_act_prio;
611 int rc = 0;
612 u16 idx;
613
614 if (!xive)
615 return -ENODEV;
616
617 pr_devel("set_xive ! irq 0x%x server 0x%x prio %d\n",
618 irq, server, priority);
619
620 /* First, check provisioning of queues */
7e10b9a6
CLG
621 if (priority != MASKED) {
622 mutex_lock(&xive->lock);
5af50993
BH
623 rc = xive_check_provisioning(xive->kvm,
624 xive_prio_from_guest(priority));
7e10b9a6
CLG
625 mutex_unlock(&xive->lock);
626 }
5af50993
BH
627 if (rc) {
628 pr_devel(" provisioning failure %d !\n", rc);
629 return rc;
630 }
631
632 sb = kvmppc_xive_find_source(xive, irq, &idx);
633 if (!sb)
634 return -EINVAL;
635 state = &sb->irq_state[idx];
636
637 /*
638 * We first handle masking/unmasking since the locking
639 * might need to be retried due to EOIs, we'll handle
640 * targetting changes later. These functions will return
641 * with the SB lock held.
642 *
643 * xive_lock_and_mask() will also set state->guest_priority
644 * but won't otherwise change other fields of the state.
645 *
646 * xive_lock_for_unmask will not actually unmask, this will
647 * be done later by xive_finish_unmask() once the targetting
648 * has been done, so we don't try to unmask an interrupt
649 * that hasn't yet been targetted.
650 */
651 if (priority == MASKED)
652 xive_lock_and_mask(xive, sb, state);
653 else
654 xive_lock_for_unmask(sb, state);
655
656
657 /*
658 * Then we handle targetting.
659 *
660 * First calculate a new "actual priority"
661 */
662 new_act_prio = state->act_priority;
663 if (priority != MASKED)
664 new_act_prio = xive_prio_from_guest(priority);
665
666 pr_devel(" new_act_prio=%x act_server=%x act_prio=%x\n",
667 new_act_prio, state->act_server, state->act_priority);
668
669 /*
670 * Then check if we actually need to change anything,
671 *
672 * The condition for re-targetting the interrupt is that
673 * we have a valid new priority (new_act_prio is not 0xff)
674 * and either the server or the priority changed.
675 *
676 * Note: If act_priority was ff and the new priority is
677 * also ff, we don't do anything and leave the interrupt
678 * untargetted. An attempt of doing an int_on on an
679 * untargetted interrupt will fail. If that is a problem
680 * we could initialize interrupts with valid default
681 */
682
683 if (new_act_prio != MASKED &&
684 (state->act_server != server ||
685 state->act_priority != new_act_prio))
686 rc = xive_target_interrupt(kvm, state, server, new_act_prio);
687
688 /*
689 * Perform the final unmasking of the interrupt source
690 * if necessary
691 */
692 if (priority != MASKED)
693 xive_finish_unmask(xive, sb, state, priority);
694
695 /*
696 * Finally Update saved_priority to match. Only int_on/off
697 * set this field to a different value.
698 */
699 state->saved_priority = priority;
700
701 arch_spin_unlock(&sb->lock);
702 return rc;
703}
704
705int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
706 u32 *priority)
707{
708 struct kvmppc_xive *xive = kvm->arch.xive;
709 struct kvmppc_xive_src_block *sb;
710 struct kvmppc_xive_irq_state *state;
711 u16 idx;
712
713 if (!xive)
714 return -ENODEV;
715
716 sb = kvmppc_xive_find_source(xive, irq, &idx);
717 if (!sb)
718 return -EINVAL;
719 state = &sb->irq_state[idx];
720 arch_spin_lock(&sb->lock);
2fb1e946 721 *server = state->act_server;
5af50993
BH
722 *priority = state->guest_priority;
723 arch_spin_unlock(&sb->lock);
724
725 return 0;
726}
727
728int kvmppc_xive_int_on(struct kvm *kvm, u32 irq)
729{
730 struct kvmppc_xive *xive = kvm->arch.xive;
731 struct kvmppc_xive_src_block *sb;
732 struct kvmppc_xive_irq_state *state;
733 u16 idx;
734
735 if (!xive)
736 return -ENODEV;
737
738 sb = kvmppc_xive_find_source(xive, irq, &idx);
739 if (!sb)
740 return -EINVAL;
741 state = &sb->irq_state[idx];
742
743 pr_devel("int_on(irq=0x%x)\n", irq);
744
745 /*
746 * Check if interrupt was not targetted
747 */
748 if (state->act_priority == MASKED) {
749 pr_devel("int_on on untargetted interrupt\n");
750 return -EINVAL;
751 }
752
753 /* If saved_priority is 0xff, do nothing */
754 if (state->saved_priority == MASKED)
755 return 0;
756
757 /*
758 * Lock and unmask it.
759 */
760 xive_lock_for_unmask(sb, state);
761 xive_finish_unmask(xive, sb, state, state->saved_priority);
762 arch_spin_unlock(&sb->lock);
763
764 return 0;
765}
766
767int kvmppc_xive_int_off(struct kvm *kvm, u32 irq)
768{
769 struct kvmppc_xive *xive = kvm->arch.xive;
770 struct kvmppc_xive_src_block *sb;
771 struct kvmppc_xive_irq_state *state;
772 u16 idx;
773
774 if (!xive)
775 return -ENODEV;
776
777 sb = kvmppc_xive_find_source(xive, irq, &idx);
778 if (!sb)
779 return -EINVAL;
780 state = &sb->irq_state[idx];
781
782 pr_devel("int_off(irq=0x%x)\n", irq);
783
784 /*
785 * Lock and mask
786 */
787 state->saved_priority = xive_lock_and_mask(xive, sb, state);
788 arch_spin_unlock(&sb->lock);
789
790 return 0;
791}
792
793static bool xive_restore_pending_irq(struct kvmppc_xive *xive, u32 irq)
794{
795 struct kvmppc_xive_src_block *sb;
796 struct kvmppc_xive_irq_state *state;
797 u16 idx;
798
799 sb = kvmppc_xive_find_source(xive, irq, &idx);
800 if (!sb)
801 return false;
802 state = &sb->irq_state[idx];
803 if (!state->valid)
804 return false;
805
806 /*
807 * Trigger the IPI. This assumes we never restore a pass-through
808 * interrupt which should be safe enough
809 */
810 xive_irq_trigger(&state->ipi_data);
811
812 return true;
813}
814
815u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu)
816{
817 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
818
819 if (!xc)
820 return 0;
821
822 /* Return the per-cpu state for state saving/migration */
823 return (u64)xc->cppr << KVM_REG_PPC_ICP_CPPR_SHIFT |
7333b5ac
LV
824 (u64)xc->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT |
825 (u64)0xff << KVM_REG_PPC_ICP_PPRI_SHIFT;
5af50993
BH
826}
827
828int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
829{
830 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
831 struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
832 u8 cppr, mfrr;
833 u32 xisr;
834
835 if (!xc || !xive)
836 return -ENOENT;
837
838 /* Grab individual state fields. We don't use pending_pri */
839 cppr = icpval >> KVM_REG_PPC_ICP_CPPR_SHIFT;
840 xisr = (icpval >> KVM_REG_PPC_ICP_XISR_SHIFT) &
841 KVM_REG_PPC_ICP_XISR_MASK;
842 mfrr = icpval >> KVM_REG_PPC_ICP_MFRR_SHIFT;
843
844 pr_devel("set_icp vcpu %d cppr=0x%x mfrr=0x%x xisr=0x%x\n",
845 xc->server_num, cppr, mfrr, xisr);
846
847 /*
848 * We can't update the state of a "pushed" VCPU, but that
6f868405
PM
849 * shouldn't happen because the vcpu->mutex makes running a
850 * vcpu mutually exclusive with doing one_reg get/set on it.
5af50993
BH
851 */
852 if (WARN_ON(vcpu->arch.xive_pushed))
853 return -EIO;
854
855 /* Update VCPU HW saved state */
856 vcpu->arch.xive_saved_state.cppr = cppr;
857 xc->hw_cppr = xc->cppr = cppr;
858
859 /*
860 * Update MFRR state. If it's not 0xff, we mark the VCPU as
861 * having a pending MFRR change, which will re-evaluate the
862 * target. The VCPU will thus potentially get a spurious
863 * interrupt but that's not a big deal.
864 */
865 xc->mfrr = mfrr;
866 if (mfrr < cppr)
867 xive_irq_trigger(&xc->vp_ipi_data);
868
869 /*
870 * Now saved XIRR is "interesting". It means there's something in
871 * the legacy "1 element" queue... for an IPI we simply ignore it,
872 * as the MFRR restore will handle that. For anything else we need
873 * to force a resend of the source.
874 * However the source may not have been setup yet. If that's the
875 * case, we keep that info and increment a counter in the xive to
876 * tell subsequent xive_set_source() to go look.
877 */
878 if (xisr > XICS_IPI && !xive_restore_pending_irq(xive, xisr)) {
879 xc->delayed_irq = xisr;
880 xive->delayed_irqs++;
881 pr_devel(" xisr restore delayed\n");
882 }
883
884 return 0;
885}
886
887int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
888 struct irq_desc *host_desc)
889{
890 struct kvmppc_xive *xive = kvm->arch.xive;
891 struct kvmppc_xive_src_block *sb;
892 struct kvmppc_xive_irq_state *state;
893 struct irq_data *host_data = irq_desc_get_irq_data(host_desc);
894 unsigned int host_irq = irq_desc_get_irq(host_desc);
895 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(host_data);
896 u16 idx;
897 u8 prio;
898 int rc;
899
900 if (!xive)
901 return -ENODEV;
902
903 pr_devel("set_mapped girq 0x%lx host HW irq 0x%x...\n",guest_irq, hw_irq);
904
905 sb = kvmppc_xive_find_source(xive, guest_irq, &idx);
906 if (!sb)
907 return -EINVAL;
908 state = &sb->irq_state[idx];
909
910 /*
911 * Mark the passed-through interrupt as going to a VCPU,
912 * this will prevent further EOIs and similar operations
913 * from the XIVE code. It will also mask the interrupt
914 * to either PQ=10 or 11 state, the latter if the interrupt
915 * is pending. This will allow us to unmask or retrigger it
916 * after routing it to the guest with a simple EOI.
917 *
918 * The "state" argument is a "token", all it needs is to be
919 * non-NULL to switch to passed-through or NULL for the
920 * other way around. We may not yet have an actual VCPU
921 * target here and we don't really care.
922 */
923 rc = irq_set_vcpu_affinity(host_irq, state);
924 if (rc) {
925 pr_err("Failed to set VCPU affinity for irq %d\n", host_irq);
926 return rc;
927 }
928
929 /*
930 * Mask and read state of IPI. We need to know if its P bit
931 * is set as that means it's potentially already using a
932 * queue entry in the target
933 */
934 prio = xive_lock_and_mask(xive, sb, state);
935 pr_devel(" old IPI prio %02x P:%d Q:%d\n", prio,
936 state->old_p, state->old_q);
937
938 /* Turn the IPI hard off */
939 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
940
232b984b
CLG
941 /*
942 * Reset ESB guest mapping. Needed when ESB pages are exposed
943 * to the guest in XIVE native mode
944 */
945 if (xive->ops && xive->ops->reset_mapped)
946 xive->ops->reset_mapped(kvm, guest_irq);
947
5af50993
BH
948 /* Grab info about irq */
949 state->pt_number = hw_irq;
950 state->pt_data = irq_data_get_irq_handler_data(host_data);
951
952 /*
953 * Configure the IRQ to match the existing configuration of
954 * the IPI if it was already targetted. Otherwise this will
955 * mask the interrupt in a lossy way (act_priority is 0xff)
956 * which is fine for a never started interrupt.
957 */
958 xive_native_configure_irq(hw_irq,
eacc56bb 959 kvmppc_xive_vp(xive, state->act_server),
5af50993
BH
960 state->act_priority, state->number);
961
962 /*
963 * We do an EOI to enable the interrupt (and retrigger if needed)
964 * if the guest has the interrupt unmasked and the P bit was *not*
965 * set in the IPI. If it was set, we know a slot may still be in
966 * use in the target queue thus we have to wait for a guest
967 * originated EOI
968 */
969 if (prio != MASKED && !state->old_p)
970 xive_vm_source_eoi(hw_irq, state->pt_data);
971
972 /* Clear old_p/old_q as they are no longer relevant */
973 state->old_p = state->old_q = false;
974
975 /* Restore guest prio (unlocks EOI) */
976 mb();
977 state->guest_priority = prio;
978 arch_spin_unlock(&sb->lock);
979
980 return 0;
981}
982EXPORT_SYMBOL_GPL(kvmppc_xive_set_mapped);
983
984int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
985 struct irq_desc *host_desc)
986{
987 struct kvmppc_xive *xive = kvm->arch.xive;
988 struct kvmppc_xive_src_block *sb;
989 struct kvmppc_xive_irq_state *state;
990 unsigned int host_irq = irq_desc_get_irq(host_desc);
991 u16 idx;
992 u8 prio;
993 int rc;
994
995 if (!xive)
996 return -ENODEV;
997
998 pr_devel("clr_mapped girq 0x%lx...\n", guest_irq);
999
1000 sb = kvmppc_xive_find_source(xive, guest_irq, &idx);
1001 if (!sb)
1002 return -EINVAL;
1003 state = &sb->irq_state[idx];
1004
1005 /*
1006 * Mask and read state of IRQ. We need to know if its P bit
1007 * is set as that means it's potentially already using a
1008 * queue entry in the target
1009 */
1010 prio = xive_lock_and_mask(xive, sb, state);
1011 pr_devel(" old IRQ prio %02x P:%d Q:%d\n", prio,
1012 state->old_p, state->old_q);
1013
1014 /*
1015 * If old_p is set, the interrupt is pending, we switch it to
1016 * PQ=11. This will force a resend in the host so the interrupt
1017 * isn't lost to whatver host driver may pick it up
1018 */
1019 if (state->old_p)
1020 xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_11);
1021
1022 /* Release the passed-through interrupt to the host */
1023 rc = irq_set_vcpu_affinity(host_irq, NULL);
1024 if (rc) {
1025 pr_err("Failed to clr VCPU affinity for irq %d\n", host_irq);
1026 return rc;
1027 }
1028
1029 /* Forget about the IRQ */
1030 state->pt_number = 0;
1031 state->pt_data = NULL;
1032
232b984b
CLG
1033 /*
1034 * Reset ESB guest mapping. Needed when ESB pages are exposed
1035 * to the guest in XIVE native mode
1036 */
1037 if (xive->ops && xive->ops->reset_mapped) {
1038 xive->ops->reset_mapped(kvm, guest_irq);
1039 }
1040
5af50993
BH
1041 /* Reconfigure the IPI */
1042 xive_native_configure_irq(state->ipi_number,
eacc56bb 1043 kvmppc_xive_vp(xive, state->act_server),
5af50993
BH
1044 state->act_priority, state->number);
1045
1046 /*
1047 * If old_p is set (we have a queue entry potentially
1048 * occupied) or the interrupt is masked, we set the IPI
1049 * to PQ=10 state. Otherwise we just re-enable it (PQ=00).
1050 */
1051 if (prio == MASKED || state->old_p)
1052 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_10);
1053 else
1054 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_00);
1055
1056 /* Restore guest prio (unlocks EOI) */
1057 mb();
1058 state->guest_priority = prio;
1059 arch_spin_unlock(&sb->lock);
1060
1061 return 0;
1062}
1063EXPORT_SYMBOL_GPL(kvmppc_xive_clr_mapped);
1064
eacc56bb 1065void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu)
5af50993
BH
1066{
1067 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1068 struct kvm *kvm = vcpu->kvm;
1069 struct kvmppc_xive *xive = kvm->arch.xive;
1070 int i, j;
1071
1072 for (i = 0; i <= xive->max_sbid; i++) {
1073 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1074
1075 if (!sb)
1076 continue;
1077 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) {
1078 struct kvmppc_xive_irq_state *state = &sb->irq_state[j];
1079
1080 if (!state->valid)
1081 continue;
1082 if (state->act_priority == MASKED)
1083 continue;
1084 if (state->act_server != xc->server_num)
1085 continue;
1086
1087 /* Clean it up */
1088 arch_spin_lock(&sb->lock);
1089 state->act_priority = MASKED;
1090 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
1091 xive_native_configure_irq(state->ipi_number, 0, MASKED, 0);
1092 if (state->pt_number) {
1093 xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_01);
1094 xive_native_configure_irq(state->pt_number, 0, MASKED, 0);
1095 }
1096 arch_spin_unlock(&sb->lock);
1097 }
1098 }
0caecf5b
PM
1099
1100 /* Disable vcpu's escalation interrupt */
1101 if (vcpu->arch.xive_esc_on) {
1102 __raw_readq((void __iomem *)(vcpu->arch.xive_esc_vaddr +
1103 XIVE_ESB_SET_PQ_01));
1104 vcpu->arch.xive_esc_on = false;
1105 }
1106
1107 /*
1108 * Clear pointers to escalation interrupt ESB.
1109 * This is safe because the vcpu->mutex is held, preventing
1110 * any other CPU from concurrently executing a KVM_RUN ioctl.
1111 */
1112 vcpu->arch.xive_esc_vaddr = 0;
1113 vcpu->arch.xive_esc_raddr = 0;
5af50993
BH
1114}
1115
1116void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu)
1117{
1118 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
5422e951 1119 struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
5af50993
BH
1120 int i;
1121
5422e951
CLG
1122 if (!kvmppc_xics_enabled(vcpu))
1123 return;
1124
1125 if (!xc)
1126 return;
1127
5af50993
BH
1128 pr_devel("cleanup_vcpu(cpu=%d)\n", xc->server_num);
1129
1130 /* Ensure no interrupt is still routed to that VP */
1131 xc->valid = false;
1132 kvmppc_xive_disable_vcpu_interrupts(vcpu);
1133
1134 /* Mask the VP IPI */
1135 xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_01);
1136
1137 /* Disable the VP */
1138 xive_native_disable_vp(xc->vp_id);
1139
1140 /* Free the queues & associated interrupts */
1141 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1142 struct xive_q *q = &xc->queues[i];
1143
1144 /* Free the escalation irq */
1145 if (xc->esc_virq[i]) {
1146 free_irq(xc->esc_virq[i], vcpu);
1147 irq_dispose_mapping(xc->esc_virq[i]);
1148 kfree(xc->esc_virq_names[i]);
1149 }
1150 /* Free the queue */
1151 xive_native_disable_queue(xc->vp_id, q, i);
1152 if (q->qpage) {
1153 free_pages((unsigned long)q->qpage,
1154 xive->q_page_order);
1155 q->qpage = NULL;
1156 }
1157 }
1158
1159 /* Free the IPI */
1160 if (xc->vp_ipi) {
1161 xive_cleanup_irq_data(&xc->vp_ipi_data);
1162 xive_native_free_irq(xc->vp_ipi);
1163 }
1164 /* Free the VP */
1165 kfree(xc);
5422e951
CLG
1166
1167 /* Cleanup the vcpu */
1168 vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT;
1169 vcpu->arch.xive_vcpu = NULL;
5af50993
BH
1170}
1171
1172int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
1173 struct kvm_vcpu *vcpu, u32 cpu)
1174{
1175 struct kvmppc_xive *xive = dev->private;
1176 struct kvmppc_xive_vcpu *xc;
1177 int i, r = -EBUSY;
1178
1179 pr_devel("connect_vcpu(cpu=%d)\n", cpu);
1180
1181 if (dev->ops != &kvm_xive_ops) {
1182 pr_devel("Wrong ops !\n");
1183 return -EPERM;
1184 }
1185 if (xive->kvm != vcpu->kvm)
1186 return -EPERM;
5422e951 1187 if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT)
5af50993
BH
1188 return -EBUSY;
1189 if (kvmppc_xive_find_server(vcpu->kvm, cpu)) {
1190 pr_devel("Duplicate !\n");
1191 return -EEXIST;
1192 }
1e175d2e 1193 if (cpu >= (KVM_MAX_VCPUS * vcpu->kvm->arch.emul_smt_mode)) {
5af50993
BH
1194 pr_devel("Out of bounds !\n");
1195 return -EINVAL;
1196 }
1197 xc = kzalloc(sizeof(*xc), GFP_KERNEL);
1198 if (!xc)
1199 return -ENOMEM;
1200
1201 /* We need to synchronize with queue provisioning */
7e10b9a6 1202 mutex_lock(&xive->lock);
5af50993
BH
1203 vcpu->arch.xive_vcpu = xc;
1204 xc->xive = xive;
1205 xc->vcpu = vcpu;
1206 xc->server_num = cpu;
eacc56bb 1207 xc->vp_id = kvmppc_xive_vp(xive, cpu);
5af50993
BH
1208 xc->mfrr = 0xff;
1209 xc->valid = true;
1210
1211 r = xive_native_get_vp_info(xc->vp_id, &xc->vp_cam, &xc->vp_chip_id);
1212 if (r)
1213 goto bail;
1214
1215 /* Configure VCPU fields for use by assembly push/pull */
1216 vcpu->arch.xive_saved_state.w01 = cpu_to_be64(0xff000000);
1217 vcpu->arch.xive_cam_word = cpu_to_be32(xc->vp_cam | TM_QW1W2_VO);
1218
1219 /* Allocate IPI */
1220 xc->vp_ipi = xive_native_alloc_irq();
1221 if (!xc->vp_ipi) {
bf4159da 1222 pr_err("Failed to allocate xive irq for VCPU IPI\n");
5af50993
BH
1223 r = -EIO;
1224 goto bail;
1225 }
1226 pr_devel(" IPI=0x%x\n", xc->vp_ipi);
1227
1228 r = xive_native_populate_irq_data(xc->vp_ipi, &xc->vp_ipi_data);
1229 if (r)
1230 goto bail;
1231
bf4159da
BH
1232 /*
1233 * Enable the VP first as the single escalation mode will
1234 * affect escalation interrupts numbering
1235 */
1236 r = xive_native_enable_vp(xc->vp_id, xive->single_escalation);
1237 if (r) {
1238 pr_err("Failed to enable VP in OPAL, err %d\n", r);
1239 goto bail;
1240 }
1241
5af50993
BH
1242 /*
1243 * Initialize queues. Initially we set them all for no queueing
1244 * and we enable escalation for queue 0 only which we'll use for
1245 * our mfrr change notifications. If the VCPU is hot-plugged, we
bf4159da
BH
1246 * do handle provisioning however based on the existing "map"
1247 * of enabled queues.
5af50993
BH
1248 */
1249 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1250 struct xive_q *q = &xc->queues[i];
1251
bf4159da
BH
1252 /* Single escalation, no queue 7 */
1253 if (i == 7 && xive->single_escalation)
1254 break;
1255
5af50993
BH
1256 /* Is queue already enabled ? Provision it */
1257 if (xive->qmap & (1 << i)) {
1258 r = xive_provision_queue(vcpu, i);
bf4159da 1259 if (r == 0 && !xive->single_escalation)
13ce3297
CLG
1260 kvmppc_xive_attach_escalation(
1261 vcpu, i, xive->single_escalation);
5af50993
BH
1262 if (r)
1263 goto bail;
1264 } else {
1265 r = xive_native_configure_queue(xc->vp_id,
1266 q, i, NULL, 0, true);
1267 if (r) {
1268 pr_err("Failed to configure queue %d for VCPU %d\n",
1269 i, cpu);
1270 goto bail;
1271 }
1272 }
1273 }
1274
1275 /* If not done above, attach priority 0 escalation */
13ce3297 1276 r = kvmppc_xive_attach_escalation(vcpu, 0, xive->single_escalation);
5af50993
BH
1277 if (r)
1278 goto bail;
1279
5af50993
BH
1280 /* Route the IPI */
1281 r = xive_native_configure_irq(xc->vp_ipi, xc->vp_id, 0, XICS_IPI);
1282 if (!r)
1283 xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_00);
1284
1285bail:
7e10b9a6 1286 mutex_unlock(&xive->lock);
5af50993
BH
1287 if (r) {
1288 kvmppc_xive_cleanup_vcpu(vcpu);
1289 return r;
1290 }
1291
1292 vcpu->arch.irq_type = KVMPPC_IRQ_XICS;
1293 return 0;
1294}
1295
1296/*
1297 * Scanning of queues before/after migration save
1298 */
1299static void xive_pre_save_set_queued(struct kvmppc_xive *xive, u32 irq)
1300{
1301 struct kvmppc_xive_src_block *sb;
1302 struct kvmppc_xive_irq_state *state;
1303 u16 idx;
1304
1305 sb = kvmppc_xive_find_source(xive, irq, &idx);
1306 if (!sb)
1307 return;
1308
1309 state = &sb->irq_state[idx];
1310
1311 /* Some sanity checking */
1312 if (!state->valid) {
1313 pr_err("invalid irq 0x%x in cpu queue!\n", irq);
1314 return;
1315 }
1316
1317 /*
1318 * If the interrupt is in a queue it should have P set.
1319 * We warn so that gets reported. A backtrace isn't useful
1320 * so no need to use a WARN_ON.
1321 */
1322 if (!state->saved_p)
1323 pr_err("Interrupt 0x%x is marked in a queue but P not set !\n", irq);
1324
1325 /* Set flag */
1326 state->in_queue = true;
1327}
1328
1329static void xive_pre_save_mask_irq(struct kvmppc_xive *xive,
1330 struct kvmppc_xive_src_block *sb,
1331 u32 irq)
1332{
1333 struct kvmppc_xive_irq_state *state = &sb->irq_state[irq];
1334
1335 if (!state->valid)
1336 return;
1337
1338 /* Mask and save state, this will also sync HW queues */
1339 state->saved_scan_prio = xive_lock_and_mask(xive, sb, state);
1340
1341 /* Transfer P and Q */
1342 state->saved_p = state->old_p;
1343 state->saved_q = state->old_q;
1344
1345 /* Unlock */
1346 arch_spin_unlock(&sb->lock);
1347}
1348
1349static void xive_pre_save_unmask_irq(struct kvmppc_xive *xive,
1350 struct kvmppc_xive_src_block *sb,
1351 u32 irq)
1352{
1353 struct kvmppc_xive_irq_state *state = &sb->irq_state[irq];
1354
1355 if (!state->valid)
1356 return;
1357
1358 /*
1359 * Lock / exclude EOI (not technically necessary if the
1360 * guest isn't running concurrently. If this becomes a
1361 * performance issue we can probably remove the lock.
1362 */
1363 xive_lock_for_unmask(sb, state);
1364
1365 /* Restore mask/prio if it wasn't masked */
1366 if (state->saved_scan_prio != MASKED)
1367 xive_finish_unmask(xive, sb, state, state->saved_scan_prio);
1368
1369 /* Unlock */
1370 arch_spin_unlock(&sb->lock);
1371}
1372
1373static void xive_pre_save_queue(struct kvmppc_xive *xive, struct xive_q *q)
1374{
1375 u32 idx = q->idx;
1376 u32 toggle = q->toggle;
1377 u32 irq;
1378
1379 do {
1380 irq = __xive_read_eq(q->qpage, q->msk, &idx, &toggle);
1381 if (irq > XICS_IPI)
1382 xive_pre_save_set_queued(xive, irq);
1383 } while(irq);
1384}
1385
1386static void xive_pre_save_scan(struct kvmppc_xive *xive)
1387{
1388 struct kvm_vcpu *vcpu = NULL;
1389 int i, j;
1390
1391 /*
1392 * See comment in xive_get_source() about how this
1393 * work. Collect a stable state for all interrupts
1394 */
1395 for (i = 0; i <= xive->max_sbid; i++) {
1396 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1397 if (!sb)
1398 continue;
1399 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
1400 xive_pre_save_mask_irq(xive, sb, j);
1401 }
1402
1403 /* Then scan the queues and update the "in_queue" flag */
1404 kvm_for_each_vcpu(i, vcpu, xive->kvm) {
1405 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1406 if (!xc)
1407 continue;
1408 for (j = 0; j < KVMPPC_XIVE_Q_COUNT; j++) {
00c14757
PM
1409 if (xc->queues[j].qpage)
1410 xive_pre_save_queue(xive, &xc->queues[j]);
5af50993
BH
1411 }
1412 }
1413
1414 /* Finally restore interrupt states */
1415 for (i = 0; i <= xive->max_sbid; i++) {
1416 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1417 if (!sb)
1418 continue;
1419 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
1420 xive_pre_save_unmask_irq(xive, sb, j);
1421 }
1422}
1423
1424static void xive_post_save_scan(struct kvmppc_xive *xive)
1425{
1426 u32 i, j;
1427
1428 /* Clear all the in_queue flags */
1429 for (i = 0; i <= xive->max_sbid; i++) {
1430 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1431 if (!sb)
1432 continue;
1433 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
1434 sb->irq_state[j].in_queue = false;
1435 }
1436
1437 /* Next get_source() will do a new scan */
1438 xive->saved_src_count = 0;
1439}
1440
1441/*
1442 * This returns the source configuration and state to user space.
1443 */
1444static int xive_get_source(struct kvmppc_xive *xive, long irq, u64 addr)
1445{
1446 struct kvmppc_xive_src_block *sb;
1447 struct kvmppc_xive_irq_state *state;
1448 u64 __user *ubufp = (u64 __user *) addr;
1449 u64 val, prio;
1450 u16 idx;
1451
1452 sb = kvmppc_xive_find_source(xive, irq, &idx);
1453 if (!sb)
1454 return -ENOENT;
1455
1456 state = &sb->irq_state[idx];
1457
1458 if (!state->valid)
1459 return -ENOENT;
1460
1461 pr_devel("get_source(%ld)...\n", irq);
1462
1463 /*
1464 * So to properly save the state into something that looks like a
1465 * XICS migration stream we cannot treat interrupts individually.
1466 *
1467 * We need, instead, mask them all (& save their previous PQ state)
1468 * to get a stable state in the HW, then sync them to ensure that
1469 * any interrupt that had already fired hits its queue, and finally
1470 * scan all the queues to collect which interrupts are still present
1471 * in the queues, so we can set the "pending" flag on them and
1472 * they can be resent on restore.
1473 *
1474 * So we do it all when the "first" interrupt gets saved, all the
1475 * state is collected at that point, the rest of xive_get_source()
1476 * will merely collect and convert that state to the expected
1477 * userspace bit mask.
1478 */
1479 if (xive->saved_src_count == 0)
1480 xive_pre_save_scan(xive);
1481 xive->saved_src_count++;
1482
1483 /* Convert saved state into something compatible with xics */
2fb1e946 1484 val = state->act_server;
5af50993
BH
1485 prio = state->saved_scan_prio;
1486
1487 if (prio == MASKED) {
1488 val |= KVM_XICS_MASKED;
1489 prio = state->saved_priority;
1490 }
1491 val |= prio << KVM_XICS_PRIORITY_SHIFT;
1492 if (state->lsi) {
1493 val |= KVM_XICS_LEVEL_SENSITIVE;
1494 if (state->saved_p)
1495 val |= KVM_XICS_PENDING;
1496 } else {
1497 if (state->saved_p)
1498 val |= KVM_XICS_PRESENTED;
1499
1500 if (state->saved_q)
1501 val |= KVM_XICS_QUEUED;
1502
1503 /*
1504 * We mark it pending (which will attempt a re-delivery)
1505 * if we are in a queue *or* we were masked and had
1506 * Q set which is equivalent to the XICS "masked pending"
1507 * state
1508 */
1509 if (state->in_queue || (prio == MASKED && state->saved_q))
1510 val |= KVM_XICS_PENDING;
1511 }
1512
1513 /*
1514 * If that was the last interrupt saved, reset the
1515 * in_queue flags
1516 */
1517 if (xive->saved_src_count == xive->src_count)
1518 xive_post_save_scan(xive);
1519
1520 /* Copy the result to userspace */
1521 if (put_user(val, ubufp))
1522 return -EFAULT;
1523
1524 return 0;
1525}
1526
4131f83c
CLG
1527struct kvmppc_xive_src_block *kvmppc_xive_create_src_block(
1528 struct kvmppc_xive *xive, int irq)
5af50993 1529{
5af50993
BH
1530 struct kvmppc_xive_src_block *sb;
1531 int i, bid;
1532
1533 bid = irq >> KVMPPC_XICS_ICS_SHIFT;
1534
7e10b9a6 1535 mutex_lock(&xive->lock);
5af50993
BH
1536
1537 /* block already exists - somebody else got here first */
1538 if (xive->src_blocks[bid])
1539 goto out;
1540
1541 /* Create the ICS */
1542 sb = kzalloc(sizeof(*sb), GFP_KERNEL);
1543 if (!sb)
1544 goto out;
1545
1546 sb->id = bid;
1547
1548 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
1549 sb->irq_state[i].number = (bid << KVMPPC_XICS_ICS_SHIFT) | i;
e8676ce5 1550 sb->irq_state[i].eisn = 0;
5af50993
BH
1551 sb->irq_state[i].guest_priority = MASKED;
1552 sb->irq_state[i].saved_priority = MASKED;
1553 sb->irq_state[i].act_priority = MASKED;
1554 }
1555 smp_wmb();
1556 xive->src_blocks[bid] = sb;
1557
1558 if (bid > xive->max_sbid)
1559 xive->max_sbid = bid;
1560
1561out:
7e10b9a6 1562 mutex_unlock(&xive->lock);
5af50993
BH
1563 return xive->src_blocks[bid];
1564}
1565
1566static bool xive_check_delayed_irq(struct kvmppc_xive *xive, u32 irq)
1567{
1568 struct kvm *kvm = xive->kvm;
1569 struct kvm_vcpu *vcpu = NULL;
1570 int i;
1571
1572 kvm_for_each_vcpu(i, vcpu, kvm) {
1573 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1574
1575 if (!xc)
1576 continue;
1577
1578 if (xc->delayed_irq == irq) {
1579 xc->delayed_irq = 0;
1580 xive->delayed_irqs--;
1581 return true;
1582 }
1583 }
1584 return false;
1585}
1586
1587static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr)
1588{
1589 struct kvmppc_xive_src_block *sb;
1590 struct kvmppc_xive_irq_state *state;
1591 u64 __user *ubufp = (u64 __user *) addr;
1592 u16 idx;
1593 u64 val;
1594 u8 act_prio, guest_prio;
1595 u32 server;
1596 int rc = 0;
1597
1598 if (irq < KVMPPC_XICS_FIRST_IRQ || irq >= KVMPPC_XICS_NR_IRQS)
1599 return -ENOENT;
1600
1601 pr_devel("set_source(irq=0x%lx)\n", irq);
1602
1603 /* Find the source */
1604 sb = kvmppc_xive_find_source(xive, irq, &idx);
1605 if (!sb) {
1606 pr_devel("No source, creating source block...\n");
4131f83c 1607 sb = kvmppc_xive_create_src_block(xive, irq);
5af50993
BH
1608 if (!sb) {
1609 pr_devel("Failed to create block...\n");
1610 return -ENOMEM;
1611 }
1612 }
1613 state = &sb->irq_state[idx];
1614
1615 /* Read user passed data */
1616 if (get_user(val, ubufp)) {
1617 pr_devel("fault getting user info !\n");
1618 return -EFAULT;
1619 }
1620
1621 server = val & KVM_XICS_DESTINATION_MASK;
1622 guest_prio = val >> KVM_XICS_PRIORITY_SHIFT;
1623
1624 pr_devel(" val=0x016%llx (server=0x%x, guest_prio=%d)\n",
1625 val, server, guest_prio);
bf4159da 1626
5af50993
BH
1627 /*
1628 * If the source doesn't already have an IPI, allocate
1629 * one and get the corresponding data
1630 */
1631 if (!state->ipi_number) {
1632 state->ipi_number = xive_native_alloc_irq();
1633 if (state->ipi_number == 0) {
1634 pr_devel("Failed to allocate IPI !\n");
1635 return -ENOMEM;
1636 }
1637 xive_native_populate_irq_data(state->ipi_number, &state->ipi_data);
1638 pr_devel(" src_ipi=0x%x\n", state->ipi_number);
1639 }
1640
1641 /*
1642 * We use lock_and_mask() to set us in the right masked
1643 * state. We will override that state from the saved state
1644 * further down, but this will handle the cases of interrupts
1645 * that need FW masking. We set the initial guest_priority to
1646 * 0 before calling it to ensure it actually performs the masking.
1647 */
1648 state->guest_priority = 0;
1649 xive_lock_and_mask(xive, sb, state);
1650
1651 /*
1652 * Now, we select a target if we have one. If we don't we
1653 * leave the interrupt untargetted. It means that an interrupt
1654 * can become "untargetted" accross migration if it was masked
1655 * by set_xive() but there is little we can do about it.
1656 */
1657
1658 /* First convert prio and mark interrupt as untargetted */
1659 act_prio = xive_prio_from_guest(guest_prio);
1660 state->act_priority = MASKED;
5af50993
BH
1661
1662 /*
1663 * We need to drop the lock due to the mutex below. Hopefully
1664 * nothing is touching that interrupt yet since it hasn't been
1665 * advertized to a running guest yet
1666 */
1667 arch_spin_unlock(&sb->lock);
1668
1669 /* If we have a priority target the interrupt */
1670 if (act_prio != MASKED) {
1671 /* First, check provisioning of queues */
7e10b9a6 1672 mutex_lock(&xive->lock);
5af50993 1673 rc = xive_check_provisioning(xive->kvm, act_prio);
7e10b9a6 1674 mutex_unlock(&xive->lock);
5af50993
BH
1675
1676 /* Target interrupt */
1677 if (rc == 0)
1678 rc = xive_target_interrupt(xive->kvm, state,
1679 server, act_prio);
1680 /*
1681 * If provisioning or targetting failed, leave it
1682 * alone and masked. It will remain disabled until
1683 * the guest re-targets it.
1684 */
1685 }
1686
1687 /*
1688 * Find out if this was a delayed irq stashed in an ICP,
1689 * in which case, treat it as pending
1690 */
1691 if (xive->delayed_irqs && xive_check_delayed_irq(xive, irq)) {
1692 val |= KVM_XICS_PENDING;
1693 pr_devel(" Found delayed ! forcing PENDING !\n");
1694 }
1695
1696 /* Cleanup the SW state */
1697 state->old_p = false;
1698 state->old_q = false;
1699 state->lsi = false;
1700 state->asserted = false;
1701
1702 /* Restore LSI state */
1703 if (val & KVM_XICS_LEVEL_SENSITIVE) {
1704 state->lsi = true;
1705 if (val & KVM_XICS_PENDING)
1706 state->asserted = true;
1707 pr_devel(" LSI ! Asserted=%d\n", state->asserted);
1708 }
1709
1710 /*
1711 * Restore P and Q. If the interrupt was pending, we
dc1c4165 1712 * force Q and !P, which will trigger a resend.
5af50993
BH
1713 *
1714 * That means that a guest that had both an interrupt
1715 * pending (queued) and Q set will restore with only
1716 * one instance of that interrupt instead of 2, but that
1717 * is perfectly fine as coalescing interrupts that haven't
1718 * been presented yet is always allowed.
1719 */
dc1c4165 1720 if (val & KVM_XICS_PRESENTED && !(val & KVM_XICS_PENDING))
5af50993
BH
1721 state->old_p = true;
1722 if (val & KVM_XICS_QUEUED || val & KVM_XICS_PENDING)
1723 state->old_q = true;
1724
1725 pr_devel(" P=%d, Q=%d\n", state->old_p, state->old_q);
1726
1727 /*
1728 * If the interrupt was unmasked, update guest priority and
1729 * perform the appropriate state transition and do a
1730 * re-trigger if necessary.
1731 */
1732 if (val & KVM_XICS_MASKED) {
1733 pr_devel(" masked, saving prio\n");
1734 state->guest_priority = MASKED;
1735 state->saved_priority = guest_prio;
1736 } else {
1737 pr_devel(" unmasked, restoring to prio %d\n", guest_prio);
1738 xive_finish_unmask(xive, sb, state, guest_prio);
1739 state->saved_priority = guest_prio;
1740 }
1741
1742 /* Increment the number of valid sources and mark this one valid */
1743 if (!state->valid)
1744 xive->src_count++;
1745 state->valid = true;
1746
1747 return 0;
1748}
1749
1750int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
1751 bool line_status)
1752{
1753 struct kvmppc_xive *xive = kvm->arch.xive;
1754 struct kvmppc_xive_src_block *sb;
1755 struct kvmppc_xive_irq_state *state;
1756 u16 idx;
1757
1758 if (!xive)
1759 return -ENODEV;
1760
1761 sb = kvmppc_xive_find_source(xive, irq, &idx);
1762 if (!sb)
1763 return -EINVAL;
1764
1765 /* Perform locklessly .... (we need to do some RCUisms here...) */
1766 state = &sb->irq_state[idx];
1767 if (!state->valid)
1768 return -EINVAL;
1769
1770 /* We don't allow a trigger on a passed-through interrupt */
1771 if (state->pt_number)
1772 return -EINVAL;
1773
1774 if ((level == 1 && state->lsi) || level == KVM_INTERRUPT_SET_LEVEL)
1775 state->asserted = 1;
1776 else if (level == 0 || level == KVM_INTERRUPT_UNSET) {
1777 state->asserted = 0;
1778 return 0;
1779 }
1780
1781 /* Trigger the IPI */
1782 xive_irq_trigger(&state->ipi_data);
1783
1784 return 0;
1785}
1786
1787static int xive_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1788{
1789 struct kvmppc_xive *xive = dev->private;
1790
1791 /* We honor the existing XICS ioctl */
1792 switch (attr->group) {
1793 case KVM_DEV_XICS_GRP_SOURCES:
1794 return xive_set_source(xive, attr->attr, attr->addr);
1795 }
1796 return -ENXIO;
1797}
1798
1799static int xive_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1800{
1801 struct kvmppc_xive *xive = dev->private;
1802
1803 /* We honor the existing XICS ioctl */
1804 switch (attr->group) {
1805 case KVM_DEV_XICS_GRP_SOURCES:
1806 return xive_get_source(xive, attr->attr, attr->addr);
1807 }
1808 return -ENXIO;
1809}
1810
1811static int xive_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1812{
1813 /* We honor the same limits as XICS, at least for now */
1814 switch (attr->group) {
1815 case KVM_DEV_XICS_GRP_SOURCES:
1816 if (attr->attr >= KVMPPC_XICS_FIRST_IRQ &&
1817 attr->attr < KVMPPC_XICS_NR_IRQS)
1818 return 0;
1819 break;
1820 }
1821 return -ENXIO;
1822}
1823
1824static void kvmppc_xive_cleanup_irq(u32 hw_num, struct xive_irq_data *xd)
1825{
1826 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01);
1827 xive_native_configure_irq(hw_num, 0, MASKED, 0);
5af50993
BH
1828}
1829
4131f83c 1830void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb)
5af50993
BH
1831{
1832 int i;
1833
1834 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
1835 struct kvmppc_xive_irq_state *state = &sb->irq_state[i];
1836
1837 if (!state->valid)
1838 continue;
1839
1840 kvmppc_xive_cleanup_irq(state->ipi_number, &state->ipi_data);
ef974020 1841 xive_cleanup_irq_data(&state->ipi_data);
5af50993
BH
1842 xive_native_free_irq(state->ipi_number);
1843
ef974020 1844 /* Pass-through, cleanup too but keep IRQ hw data */
5af50993
BH
1845 if (state->pt_number)
1846 kvmppc_xive_cleanup_irq(state->pt_number, state->pt_data);
1847
1848 state->valid = false;
1849 }
1850}
1851
5422e951 1852/*
6f868405 1853 * Called when device fd is closed. kvm->lock is held.
5422e951
CLG
1854 */
1855static void kvmppc_xive_release(struct kvm_device *dev)
5af50993
BH
1856{
1857 struct kvmppc_xive *xive = dev->private;
1858 struct kvm *kvm = xive->kvm;
5422e951 1859 struct kvm_vcpu *vcpu;
5af50993
BH
1860 int i;
1861
5422e951
CLG
1862 pr_devel("Releasing xive device\n");
1863
1864 /*
6f868405
PM
1865 * Since this is the device release function, we know that
1866 * userspace does not have any open fd referring to the
1867 * device. Therefore there can not be any of the device
1868 * attribute set/get functions being executed concurrently,
1869 * and similarly, the connect_vcpu and set/clr_mapped
1870 * functions also cannot be being executed.
5422e951 1871 */
c395fe1d
PM
1872
1873 debugfs_remove(xive->dentry);
5422e951 1874
6f868405
PM
1875 /*
1876 * We should clean up the vCPU interrupt presenters first.
1877 */
1878 kvm_for_each_vcpu(i, vcpu, kvm) {
1879 /*
1880 * Take vcpu->mutex to ensure that no one_reg get/set ioctl
1881 * (i.e. kvmppc_xive_[gs]et_icp) can be done concurrently.
c395fe1d
PM
1882 * Holding the vcpu->mutex also means that the vcpu cannot
1883 * be executing the KVM_RUN ioctl, and therefore it cannot
1884 * be executing the XIVE push or pull code or accessing
1885 * the XIVE MMIO regions.
6f868405
PM
1886 */
1887 mutex_lock(&vcpu->mutex);
1888 kvmppc_xive_cleanup_vcpu(vcpu);
1889 mutex_unlock(&vcpu->mutex);
1890 }
5af50993 1891
c395fe1d
PM
1892 /*
1893 * Now that we have cleared vcpu->arch.xive_vcpu, vcpu->arch.irq_type
1894 * and vcpu->arch.xive_esc_[vr]addr on each vcpu, we are safe
1895 * against xive code getting called during vcpu execution or
1896 * set/get one_reg operations.
1897 */
6f868405 1898 kvm->arch.xive = NULL;
5af50993
BH
1899
1900 /* Mask and free interrupts */
1901 for (i = 0; i <= xive->max_sbid; i++) {
1902 if (xive->src_blocks[i])
1903 kvmppc_xive_free_sources(xive->src_blocks[i]);
1904 kfree(xive->src_blocks[i]);
1905 xive->src_blocks[i] = NULL;
1906 }
1907
1908 if (xive->vp_base != XIVE_INVALID_VP)
1909 xive_native_free_vp_block(xive->vp_base);
1910
5422e951
CLG
1911 /*
1912 * A reference of the kvmppc_xive pointer is now kept under
1913 * the xive_devices struct of the machine for reuse. It is
1914 * freed when the VM is destroyed for now until we fix all the
1915 * execution paths.
1916 */
5af50993 1917
5af50993
BH
1918 kfree(dev);
1919}
1920
5422e951
CLG
1921/*
1922 * When the guest chooses the interrupt mode (XICS legacy or XIVE
1923 * native), the VM will switch of KVM device. The previous device will
1924 * be "released" before the new one is created.
1925 *
1926 * Until we are sure all execution paths are well protected, provide a
1927 * fail safe (transitional) method for device destruction, in which
1928 * the XIVE device pointer is recycled and not directly freed.
1929 */
1930struct kvmppc_xive *kvmppc_xive_get_device(struct kvm *kvm, u32 type)
1931{
1932 struct kvmppc_xive **kvm_xive_device = type == KVM_DEV_TYPE_XIVE ?
1933 &kvm->arch.xive_devices.native :
1934 &kvm->arch.xive_devices.xics_on_xive;
1935 struct kvmppc_xive *xive = *kvm_xive_device;
1936
1937 if (!xive) {
1938 xive = kzalloc(sizeof(*xive), GFP_KERNEL);
1939 *kvm_xive_device = xive;
1940 } else {
1941 memset(xive, 0, sizeof(*xive));
1942 }
1943
1944 return xive;
1945}
1946
6f868405
PM
1947/*
1948 * Create a XICS device with XIVE backend. kvm->lock is held.
1949 */
5af50993
BH
1950static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
1951{
1952 struct kvmppc_xive *xive;
1953 struct kvm *kvm = dev->kvm;
1954 int ret = 0;
1955
1956 pr_devel("Creating xive for partition\n");
1957
5422e951 1958 xive = kvmppc_xive_get_device(kvm, type);
5af50993
BH
1959 if (!xive)
1960 return -ENOMEM;
1961
1962 dev->private = xive;
1963 xive->dev = dev;
1964 xive->kvm = kvm;
7e10b9a6 1965 mutex_init(&xive->lock);
5af50993
BH
1966
1967 /* Already there ? */
1968 if (kvm->arch.xive)
1969 ret = -EEXIST;
1970 else
1971 kvm->arch.xive = xive;
1972
1973 /* We use the default queue size set by the host */
1974 xive->q_order = xive_native_default_eq_shift();
1975 if (xive->q_order < PAGE_SHIFT)
1976 xive->q_page_order = 0;
1977 else
1978 xive->q_page_order = xive->q_order - PAGE_SHIFT;
1979
1980 /* Allocate a bunch of VPs */
1981 xive->vp_base = xive_native_alloc_vp_block(KVM_MAX_VCPUS);
1982 pr_devel("VP_Base=%x\n", xive->vp_base);
1983
1984 if (xive->vp_base == XIVE_INVALID_VP)
1985 ret = -ENOMEM;
1986
bf4159da
BH
1987 xive->single_escalation = xive_native_has_single_escalation();
1988
5af50993
BH
1989 if (ret) {
1990 kfree(xive);
1991 return ret;
1992 }
1993
1994 return 0;
1995}
1996
eacc56bb
CLG
1997int kvmppc_xive_debug_show_queues(struct seq_file *m, struct kvm_vcpu *vcpu)
1998{
1999 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
2000 unsigned int i;
2001
2002 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
2003 struct xive_q *q = &xc->queues[i];
2004 u32 i0, i1, idx;
2005
2006 if (!q->qpage && !xc->esc_virq[i])
2007 continue;
2008
2009 seq_printf(m, " [q%d]: ", i);
2010
2011 if (q->qpage) {
2012 idx = q->idx;
2013 i0 = be32_to_cpup(q->qpage + idx);
2014 idx = (idx + 1) & q->msk;
2015 i1 = be32_to_cpup(q->qpage + idx);
2016 seq_printf(m, "T=%d %08x %08x...\n", q->toggle,
2017 i0, i1);
2018 }
2019 if (xc->esc_virq[i]) {
2020 struct irq_data *d = irq_get_irq_data(xc->esc_virq[i]);
2021 struct xive_irq_data *xd =
2022 irq_data_get_irq_handler_data(d);
2023 u64 pq = xive_vm_esb_load(xd, XIVE_ESB_GET);
2024
2025 seq_printf(m, "E:%c%c I(%d:%llx:%llx)",
2026 (pq & XIVE_ESB_VAL_P) ? 'P' : 'p',
2027 (pq & XIVE_ESB_VAL_Q) ? 'Q' : 'q',
2028 xc->esc_virq[i], pq, xd->eoi_page);
2029 seq_puts(m, "\n");
2030 }
2031 }
2032 return 0;
2033}
5af50993
BH
2034
2035static int xive_debug_show(struct seq_file *m, void *private)
2036{
2037 struct kvmppc_xive *xive = m->private;
2038 struct kvm *kvm = xive->kvm;
2039 struct kvm_vcpu *vcpu;
2040 u64 t_rm_h_xirr = 0;
2041 u64 t_rm_h_ipoll = 0;
2042 u64 t_rm_h_cppr = 0;
2043 u64 t_rm_h_eoi = 0;
2044 u64 t_rm_h_ipi = 0;
2045 u64 t_vm_h_xirr = 0;
2046 u64 t_vm_h_ipoll = 0;
2047 u64 t_vm_h_cppr = 0;
2048 u64 t_vm_h_eoi = 0;
2049 u64 t_vm_h_ipi = 0;
2050 unsigned int i;
2051
2052 if (!kvm)
2053 return 0;
2054
2055 seq_printf(m, "=========\nVCPU state\n=========\n");
2056
2057 kvm_for_each_vcpu(i, vcpu, kvm) {
2058 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
2059
2060 if (!xc)
2061 continue;
2062
2063 seq_printf(m, "cpu server %#x CPPR:%#x HWCPPR:%#x"
2064 " MFRR:%#x PEND:%#x h_xirr: R=%lld V=%lld\n",
2065 xc->server_num, xc->cppr, xc->hw_cppr,
2066 xc->mfrr, xc->pending,
2067 xc->stat_rm_h_xirr, xc->stat_vm_h_xirr);
c424c108 2068
eacc56bb 2069 kvmppc_xive_debug_show_queues(m, vcpu);
5af50993
BH
2070
2071 t_rm_h_xirr += xc->stat_rm_h_xirr;
2072 t_rm_h_ipoll += xc->stat_rm_h_ipoll;
2073 t_rm_h_cppr += xc->stat_rm_h_cppr;
2074 t_rm_h_eoi += xc->stat_rm_h_eoi;
2075 t_rm_h_ipi += xc->stat_rm_h_ipi;
2076 t_vm_h_xirr += xc->stat_vm_h_xirr;
2077 t_vm_h_ipoll += xc->stat_vm_h_ipoll;
2078 t_vm_h_cppr += xc->stat_vm_h_cppr;
2079 t_vm_h_eoi += xc->stat_vm_h_eoi;
2080 t_vm_h_ipi += xc->stat_vm_h_ipi;
2081 }
2082
2083 seq_printf(m, "Hcalls totals\n");
2084 seq_printf(m, " H_XIRR R=%10lld V=%10lld\n", t_rm_h_xirr, t_vm_h_xirr);
2085 seq_printf(m, " H_IPOLL R=%10lld V=%10lld\n", t_rm_h_ipoll, t_vm_h_ipoll);
2086 seq_printf(m, " H_CPPR R=%10lld V=%10lld\n", t_rm_h_cppr, t_vm_h_cppr);
2087 seq_printf(m, " H_EOI R=%10lld V=%10lld\n", t_rm_h_eoi, t_vm_h_eoi);
2088 seq_printf(m, " H_IPI R=%10lld V=%10lld\n", t_rm_h_ipi, t_vm_h_ipi);
2089
2090 return 0;
2091}
2092
0f6ddf34 2093DEFINE_SHOW_ATTRIBUTE(xive_debug);
5af50993
BH
2094
2095static void xive_debugfs_init(struct kvmppc_xive *xive)
2096{
2097 char *name;
2098
2099 name = kasprintf(GFP_KERNEL, "kvm-xive-%p", xive);
2100 if (!name) {
2101 pr_err("%s: no memory for name\n", __func__);
2102 return;
2103 }
2104
2105 xive->dentry = debugfs_create_file(name, S_IRUGO, powerpc_debugfs_root,
2106 xive, &xive_debug_fops);
2107
2108 pr_debug("%s: created %s\n", __func__, name);
2109 kfree(name);
2110}
2111
2112static void kvmppc_xive_init(struct kvm_device *dev)
2113{
2114 struct kvmppc_xive *xive = (struct kvmppc_xive *)dev->private;
2115
2116 /* Register some debug interfaces */
2117 xive_debugfs_init(xive);
2118}
2119
2120struct kvm_device_ops kvm_xive_ops = {
2121 .name = "kvm-xive",
2122 .create = kvmppc_xive_create,
2123 .init = kvmppc_xive_init,
5422e951 2124 .release = kvmppc_xive_release,
5af50993
BH
2125 .set_attr = xive_set_attr,
2126 .get_attr = xive_get_attr,
2127 .has_attr = xive_has_attr,
2128};
2129
2130void kvmppc_xive_init_module(void)
2131{
2132 __xive_vm_h_xirr = xive_vm_h_xirr;
2133 __xive_vm_h_ipoll = xive_vm_h_ipoll;
2134 __xive_vm_h_ipi = xive_vm_h_ipi;
2135 __xive_vm_h_cppr = xive_vm_h_cppr;
2136 __xive_vm_h_eoi = xive_vm_h_eoi;
2137}
2138
2139void kvmppc_xive_exit_module(void)
2140{
2141 __xive_vm_h_xirr = NULL;
2142 __xive_vm_h_ipoll = NULL;
2143 __xive_vm_h_ipi = NULL;
2144 __xive_vm_h_cppr = NULL;
2145 __xive_vm_h_eoi = NULL;
2146}