]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - arch/powerpc/kvm/book3s_xics.c
KVM: PPC: Book 3S: XICS: Don't lock twice when checking for resend
[mirror_ubuntu-zesty-kernel.git] / arch / powerpc / kvm / book3s_xics.c
1 /*
2 * Copyright 2012 Michael Ellerman, IBM Corporation.
3 * Copyright 2012 Benjamin Herrenschmidt, IBM Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 */
9
10 #include <linux/kernel.h>
11 #include <linux/kvm_host.h>
12 #include <linux/err.h>
13 #include <linux/gfp.h>
14 #include <linux/anon_inodes.h>
15 #include <linux/spinlock.h>
16
17 #include <linux/uaccess.h>
18 #include <asm/kvm_book3s.h>
19 #include <asm/kvm_ppc.h>
20 #include <asm/hvcall.h>
21 #include <asm/xics.h>
22 #include <asm/debug.h>
23 #include <asm/time.h>
24
25 #include <linux/debugfs.h>
26 #include <linux/seq_file.h>
27
28 #include "book3s_xics.h"
29
30 #if 1
31 #define XICS_DBG(fmt...) do { } while (0)
32 #else
33 #define XICS_DBG(fmt...) trace_printk(fmt)
34 #endif
35
36 #define ENABLE_REALMODE true
37 #define DEBUG_REALMODE false
38
39 /*
40 * LOCKING
41 * =======
42 *
43 * Each ICS has a spin lock protecting the information about the IRQ
44 * sources and avoiding simultaneous deliveries of the same interrupt.
45 *
46 * ICP operations are done via a single compare & swap transaction
47 * (most ICP state fits in the union kvmppc_icp_state)
48 */
49
50 /*
51 * TODO
52 * ====
53 *
54 * - To speed up resends, keep a bitmap of "resend" set bits in the
55 * ICS
56 *
57 * - Speed up server# -> ICP lookup (array ? hash table ?)
58 *
59 * - Make ICS lockless as well, or at least a per-interrupt lock or hashed
60 * locks array to improve scalability
61 */
62
63 /* -- ICS routines -- */
64
65 static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
66 u32 new_irq, bool check_resend);
67
68 /*
69 * Return value ideally indicates how the interrupt was handled, but no
70 * callers look at it (given that we don't implement KVM_IRQ_LINE_STATUS),
71 * so just return 0.
72 */
73 static int ics_deliver_irq(struct kvmppc_xics *xics, u32 irq, u32 level)
74 {
75 struct ics_irq_state *state;
76 struct kvmppc_ics *ics;
77 u16 src;
78 u32 pq_old, pq_new;
79
80 XICS_DBG("ics deliver %#x (level: %d)\n", irq, level);
81
82 ics = kvmppc_xics_find_ics(xics, irq, &src);
83 if (!ics) {
84 XICS_DBG("ics_deliver_irq: IRQ 0x%06x not found !\n", irq);
85 return -EINVAL;
86 }
87 state = &ics->irq_state[src];
88 if (!state->exists)
89 return -EINVAL;
90
91 if (level == KVM_INTERRUPT_SET_LEVEL || level == KVM_INTERRUPT_SET)
92 level = 1;
93 else if (level == KVM_INTERRUPT_UNSET)
94 level = 0;
95 /*
96 * Take other values the same as 1, consistent with original code.
97 * maybe WARN here?
98 */
99
100 if (!state->lsi && level == 0) /* noop for MSI */
101 return 0;
102
103 do {
104 pq_old = state->pq_state;
105 if (state->lsi) {
106 if (level) {
107 if (pq_old & PQ_PRESENTED)
108 /* Setting already set LSI ... */
109 return 0;
110
111 pq_new = PQ_PRESENTED;
112 } else
113 pq_new = 0;
114 } else
115 pq_new = ((pq_old << 1) & 3) | PQ_PRESENTED;
116 } while (cmpxchg(&state->pq_state, pq_old, pq_new) != pq_old);
117
118 /* Test P=1, Q=0, this is the only case where we present */
119 if (pq_new == PQ_PRESENTED)
120 icp_deliver_irq(xics, NULL, irq, false);
121
122 /* Record which CPU this arrived on for passed-through interrupts */
123 if (state->host_irq)
124 state->intr_cpu = raw_smp_processor_id();
125
126 return 0;
127 }
128
129 static void ics_check_resend(struct kvmppc_xics *xics, struct kvmppc_ics *ics,
130 struct kvmppc_icp *icp)
131 {
132 int i;
133
134 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
135 struct ics_irq_state *state = &ics->irq_state[i];
136 if (state->resend) {
137 XICS_DBG("resend %#x prio %#x\n", state->number,
138 state->priority);
139 icp_deliver_irq(xics, icp, state->number, true);
140 }
141 }
142 }
143
144 static bool write_xive(struct kvmppc_xics *xics, struct kvmppc_ics *ics,
145 struct ics_irq_state *state,
146 u32 server, u32 priority, u32 saved_priority)
147 {
148 bool deliver;
149 unsigned long flags;
150
151 local_irq_save(flags);
152 arch_spin_lock(&ics->lock);
153
154 state->server = server;
155 state->priority = priority;
156 state->saved_priority = saved_priority;
157 deliver = false;
158 if ((state->masked_pending || state->resend) && priority != MASKED) {
159 state->masked_pending = 0;
160 state->resend = 0;
161 deliver = true;
162 }
163
164 arch_spin_unlock(&ics->lock);
165 local_irq_restore(flags);
166
167 return deliver;
168 }
169
170 int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server, u32 priority)
171 {
172 struct kvmppc_xics *xics = kvm->arch.xics;
173 struct kvmppc_icp *icp;
174 struct kvmppc_ics *ics;
175 struct ics_irq_state *state;
176 u16 src;
177
178 if (!xics)
179 return -ENODEV;
180
181 ics = kvmppc_xics_find_ics(xics, irq, &src);
182 if (!ics)
183 return -EINVAL;
184 state = &ics->irq_state[src];
185
186 icp = kvmppc_xics_find_server(kvm, server);
187 if (!icp)
188 return -EINVAL;
189
190 XICS_DBG("set_xive %#x server %#x prio %#x MP:%d RS:%d\n",
191 irq, server, priority,
192 state->masked_pending, state->resend);
193
194 if (write_xive(xics, ics, state, server, priority, priority))
195 icp_deliver_irq(xics, icp, irq, false);
196
197 return 0;
198 }
199
200 int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server, u32 *priority)
201 {
202 struct kvmppc_xics *xics = kvm->arch.xics;
203 struct kvmppc_ics *ics;
204 struct ics_irq_state *state;
205 u16 src;
206 unsigned long flags;
207
208 if (!xics)
209 return -ENODEV;
210
211 ics = kvmppc_xics_find_ics(xics, irq, &src);
212 if (!ics)
213 return -EINVAL;
214 state = &ics->irq_state[src];
215
216 local_irq_save(flags);
217 arch_spin_lock(&ics->lock);
218 *server = state->server;
219 *priority = state->priority;
220 arch_spin_unlock(&ics->lock);
221 local_irq_restore(flags);
222
223 return 0;
224 }
225
226 int kvmppc_xics_int_on(struct kvm *kvm, u32 irq)
227 {
228 struct kvmppc_xics *xics = kvm->arch.xics;
229 struct kvmppc_icp *icp;
230 struct kvmppc_ics *ics;
231 struct ics_irq_state *state;
232 u16 src;
233
234 if (!xics)
235 return -ENODEV;
236
237 ics = kvmppc_xics_find_ics(xics, irq, &src);
238 if (!ics)
239 return -EINVAL;
240 state = &ics->irq_state[src];
241
242 icp = kvmppc_xics_find_server(kvm, state->server);
243 if (!icp)
244 return -EINVAL;
245
246 if (write_xive(xics, ics, state, state->server, state->saved_priority,
247 state->saved_priority))
248 icp_deliver_irq(xics, icp, irq, false);
249
250 return 0;
251 }
252
253 int kvmppc_xics_int_off(struct kvm *kvm, u32 irq)
254 {
255 struct kvmppc_xics *xics = kvm->arch.xics;
256 struct kvmppc_ics *ics;
257 struct ics_irq_state *state;
258 u16 src;
259
260 if (!xics)
261 return -ENODEV;
262
263 ics = kvmppc_xics_find_ics(xics, irq, &src);
264 if (!ics)
265 return -EINVAL;
266 state = &ics->irq_state[src];
267
268 write_xive(xics, ics, state, state->server, MASKED, state->priority);
269
270 return 0;
271 }
272
273 /* -- ICP routines, including hcalls -- */
274
275 static inline bool icp_try_update(struct kvmppc_icp *icp,
276 union kvmppc_icp_state old,
277 union kvmppc_icp_state new,
278 bool change_self)
279 {
280 bool success;
281
282 /* Calculate new output value */
283 new.out_ee = (new.xisr && (new.pending_pri < new.cppr));
284
285 /* Attempt atomic update */
286 success = cmpxchg64(&icp->state.raw, old.raw, new.raw) == old.raw;
287 if (!success)
288 goto bail;
289
290 XICS_DBG("UPD [%04lx] - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n",
291 icp->server_num,
292 old.cppr, old.mfrr, old.pending_pri, old.xisr,
293 old.need_resend, old.out_ee);
294 XICS_DBG("UPD - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n",
295 new.cppr, new.mfrr, new.pending_pri, new.xisr,
296 new.need_resend, new.out_ee);
297 /*
298 * Check for output state update
299 *
300 * Note that this is racy since another processor could be updating
301 * the state already. This is why we never clear the interrupt output
302 * here, we only ever set it. The clear only happens prior to doing
303 * an update and only by the processor itself. Currently we do it
304 * in Accept (H_XIRR) and Up_Cppr (H_XPPR).
305 *
306 * We also do not try to figure out whether the EE state has changed,
307 * we unconditionally set it if the new state calls for it. The reason
308 * for that is that we opportunistically remove the pending interrupt
309 * flag when raising CPPR, so we need to set it back here if an
310 * interrupt is still pending.
311 */
312 if (new.out_ee) {
313 kvmppc_book3s_queue_irqprio(icp->vcpu,
314 BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
315 if (!change_self)
316 kvmppc_fast_vcpu_kick(icp->vcpu);
317 }
318 bail:
319 return success;
320 }
321
322 static void icp_check_resend(struct kvmppc_xics *xics,
323 struct kvmppc_icp *icp)
324 {
325 u32 icsid;
326
327 /* Order this load with the test for need_resend in the caller */
328 smp_rmb();
329 for_each_set_bit(icsid, icp->resend_map, xics->max_icsid + 1) {
330 struct kvmppc_ics *ics = xics->ics[icsid];
331
332 if (!test_and_clear_bit(icsid, icp->resend_map))
333 continue;
334 if (!ics)
335 continue;
336 ics_check_resend(xics, ics, icp);
337 }
338 }
339
340 static bool icp_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority,
341 u32 *reject)
342 {
343 union kvmppc_icp_state old_state, new_state;
344 bool success;
345
346 XICS_DBG("try deliver %#x(P:%#x) to server %#lx\n", irq, priority,
347 icp->server_num);
348
349 do {
350 old_state = new_state = READ_ONCE(icp->state);
351
352 *reject = 0;
353
354 /* See if we can deliver */
355 success = new_state.cppr > priority &&
356 new_state.mfrr > priority &&
357 new_state.pending_pri > priority;
358
359 /*
360 * If we can, check for a rejection and perform the
361 * delivery
362 */
363 if (success) {
364 *reject = new_state.xisr;
365 new_state.xisr = irq;
366 new_state.pending_pri = priority;
367 } else {
368 /*
369 * If we failed to deliver we set need_resend
370 * so a subsequent CPPR state change causes us
371 * to try a new delivery.
372 */
373 new_state.need_resend = true;
374 }
375
376 } while (!icp_try_update(icp, old_state, new_state, false));
377
378 return success;
379 }
380
381 static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
382 u32 new_irq, bool check_resend)
383 {
384 struct ics_irq_state *state;
385 struct kvmppc_ics *ics;
386 u32 reject;
387 u16 src;
388 unsigned long flags;
389
390 /*
391 * This is used both for initial delivery of an interrupt and
392 * for subsequent rejection.
393 *
394 * Rejection can be racy vs. resends. We have evaluated the
395 * rejection in an atomic ICP transaction which is now complete,
396 * so potentially the ICP can already accept the interrupt again.
397 *
398 * So we need to retry the delivery. Essentially the reject path
399 * boils down to a failed delivery. Always.
400 *
401 * Now the interrupt could also have moved to a different target,
402 * thus we may need to re-do the ICP lookup as well
403 */
404
405 again:
406 /* Get the ICS state and lock it */
407 ics = kvmppc_xics_find_ics(xics, new_irq, &src);
408 if (!ics) {
409 XICS_DBG("icp_deliver_irq: IRQ 0x%06x not found !\n", new_irq);
410 return;
411 }
412 state = &ics->irq_state[src];
413
414 /* Get a lock on the ICS */
415 local_irq_save(flags);
416 arch_spin_lock(&ics->lock);
417
418 /* Get our server */
419 if (!icp || state->server != icp->server_num) {
420 icp = kvmppc_xics_find_server(xics->kvm, state->server);
421 if (!icp) {
422 pr_warn("icp_deliver_irq: IRQ 0x%06x server 0x%x not found !\n",
423 new_irq, state->server);
424 goto out;
425 }
426 }
427
428 if (check_resend)
429 if (!state->resend)
430 goto out;
431
432 /* Clear the resend bit of that interrupt */
433 state->resend = 0;
434
435 /*
436 * If masked, bail out
437 *
438 * Note: PAPR doesn't mention anything about masked pending
439 * when doing a resend, only when doing a delivery.
440 *
441 * However that would have the effect of losing a masked
442 * interrupt that was rejected and isn't consistent with
443 * the whole masked_pending business which is about not
444 * losing interrupts that occur while masked.
445 *
446 * I don't differentiate normal deliveries and resends, this
447 * implementation will differ from PAPR and not lose such
448 * interrupts.
449 */
450 if (state->priority == MASKED) {
451 XICS_DBG("irq %#x masked pending\n", new_irq);
452 state->masked_pending = 1;
453 goto out;
454 }
455
456 /*
457 * Try the delivery, this will set the need_resend flag
458 * in the ICP as part of the atomic transaction if the
459 * delivery is not possible.
460 *
461 * Note that if successful, the new delivery might have itself
462 * rejected an interrupt that was "delivered" before we took the
463 * ics spin lock.
464 *
465 * In this case we do the whole sequence all over again for the
466 * new guy. We cannot assume that the rejected interrupt is less
467 * favored than the new one, and thus doesn't need to be delivered,
468 * because by the time we exit icp_try_to_deliver() the target
469 * processor may well have alrady consumed & completed it, and thus
470 * the rejected interrupt might actually be already acceptable.
471 */
472 if (icp_try_to_deliver(icp, new_irq, state->priority, &reject)) {
473 /*
474 * Delivery was successful, did we reject somebody else ?
475 */
476 if (reject && reject != XICS_IPI) {
477 arch_spin_unlock(&ics->lock);
478 local_irq_restore(flags);
479 new_irq = reject;
480 check_resend = 0;
481 goto again;
482 }
483 } else {
484 /*
485 * We failed to deliver the interrupt we need to set the
486 * resend map bit and mark the ICS state as needing a resend
487 */
488 state->resend = 1;
489
490 /*
491 * Make sure when checking resend, we don't miss the resend
492 * if resend_map bit is seen and cleared.
493 */
494 smp_wmb();
495 set_bit(ics->icsid, icp->resend_map);
496
497 /*
498 * If the need_resend flag got cleared in the ICP some time
499 * between icp_try_to_deliver() atomic update and now, then
500 * we know it might have missed the resend_map bit. So we
501 * retry
502 */
503 smp_mb();
504 if (!icp->state.need_resend) {
505 state->resend = 0;
506 arch_spin_unlock(&ics->lock);
507 local_irq_restore(flags);
508 check_resend = 0;
509 goto again;
510 }
511 }
512 out:
513 arch_spin_unlock(&ics->lock);
514 local_irq_restore(flags);
515 }
516
517 static void icp_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
518 u8 new_cppr)
519 {
520 union kvmppc_icp_state old_state, new_state;
521 bool resend;
522
523 /*
524 * This handles several related states in one operation:
525 *
526 * ICP State: Down_CPPR
527 *
528 * Load CPPR with new value and if the XISR is 0
529 * then check for resends:
530 *
531 * ICP State: Resend
532 *
533 * If MFRR is more favored than CPPR, check for IPIs
534 * and notify ICS of a potential resend. This is done
535 * asynchronously (when used in real mode, we will have
536 * to exit here).
537 *
538 * We do not handle the complete Check_IPI as documented
539 * here. In the PAPR, this state will be used for both
540 * Set_MFRR and Down_CPPR. However, we know that we aren't
541 * changing the MFRR state here so we don't need to handle
542 * the case of an MFRR causing a reject of a pending irq,
543 * this will have been handled when the MFRR was set in the
544 * first place.
545 *
546 * Thus we don't have to handle rejects, only resends.
547 *
548 * When implementing real mode for HV KVM, resend will lead to
549 * a H_TOO_HARD return and the whole transaction will be handled
550 * in virtual mode.
551 */
552 do {
553 old_state = new_state = READ_ONCE(icp->state);
554
555 /* Down_CPPR */
556 new_state.cppr = new_cppr;
557
558 /*
559 * Cut down Resend / Check_IPI / IPI
560 *
561 * The logic is that we cannot have a pending interrupt
562 * trumped by an IPI at this point (see above), so we
563 * know that either the pending interrupt is already an
564 * IPI (in which case we don't care to override it) or
565 * it's either more favored than us or non existent
566 */
567 if (new_state.mfrr < new_cppr &&
568 new_state.mfrr <= new_state.pending_pri) {
569 WARN_ON(new_state.xisr != XICS_IPI &&
570 new_state.xisr != 0);
571 new_state.pending_pri = new_state.mfrr;
572 new_state.xisr = XICS_IPI;
573 }
574
575 /* Latch/clear resend bit */
576 resend = new_state.need_resend;
577 new_state.need_resend = 0;
578
579 } while (!icp_try_update(icp, old_state, new_state, true));
580
581 /*
582 * Now handle resend checks. Those are asynchronous to the ICP
583 * state update in HW (ie bus transactions) so we can handle them
584 * separately here too
585 */
586 if (resend)
587 icp_check_resend(xics, icp);
588 }
589
590 static noinline unsigned long kvmppc_h_xirr(struct kvm_vcpu *vcpu)
591 {
592 union kvmppc_icp_state old_state, new_state;
593 struct kvmppc_icp *icp = vcpu->arch.icp;
594 u32 xirr;
595
596 /* First, remove EE from the processor */
597 kvmppc_book3s_dequeue_irqprio(icp->vcpu,
598 BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
599
600 /*
601 * ICP State: Accept_Interrupt
602 *
603 * Return the pending interrupt (if any) along with the
604 * current CPPR, then clear the XISR & set CPPR to the
605 * pending priority
606 */
607 do {
608 old_state = new_state = READ_ONCE(icp->state);
609
610 xirr = old_state.xisr | (((u32)old_state.cppr) << 24);
611 if (!old_state.xisr)
612 break;
613 new_state.cppr = new_state.pending_pri;
614 new_state.pending_pri = 0xff;
615 new_state.xisr = 0;
616
617 } while (!icp_try_update(icp, old_state, new_state, true));
618
619 XICS_DBG("h_xirr vcpu %d xirr %#x\n", vcpu->vcpu_id, xirr);
620
621 return xirr;
622 }
623
624 static noinline int kvmppc_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
625 unsigned long mfrr)
626 {
627 union kvmppc_icp_state old_state, new_state;
628 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
629 struct kvmppc_icp *icp;
630 u32 reject;
631 bool resend;
632 bool local;
633
634 XICS_DBG("h_ipi vcpu %d to server %lu mfrr %#lx\n",
635 vcpu->vcpu_id, server, mfrr);
636
637 icp = vcpu->arch.icp;
638 local = icp->server_num == server;
639 if (!local) {
640 icp = kvmppc_xics_find_server(vcpu->kvm, server);
641 if (!icp)
642 return H_PARAMETER;
643 }
644
645 /*
646 * ICP state: Set_MFRR
647 *
648 * If the CPPR is more favored than the new MFRR, then
649 * nothing needs to be rejected as there can be no XISR to
650 * reject. If the MFRR is being made less favored then
651 * there might be a previously-rejected interrupt needing
652 * to be resent.
653 *
654 * ICP state: Check_IPI
655 *
656 * If the CPPR is less favored, then we might be replacing
657 * an interrupt, and thus need to possibly reject it.
658 *
659 * ICP State: IPI
660 *
661 * Besides rejecting any pending interrupts, we also
662 * update XISR and pending_pri to mark IPI as pending.
663 *
664 * PAPR does not describe this state, but if the MFRR is being
665 * made less favored than its earlier value, there might be
666 * a previously-rejected interrupt needing to be resent.
667 * Ideally, we would want to resend only if
668 * prio(pending_interrupt) < mfrr &&
669 * prio(pending_interrupt) < cppr
670 * where pending interrupt is the one that was rejected. But
671 * we don't have that state, so we simply trigger a resend
672 * whenever the MFRR is made less favored.
673 */
674 do {
675 old_state = new_state = READ_ONCE(icp->state);
676
677 /* Set_MFRR */
678 new_state.mfrr = mfrr;
679
680 /* Check_IPI */
681 reject = 0;
682 resend = false;
683 if (mfrr < new_state.cppr) {
684 /* Reject a pending interrupt if not an IPI */
685 if (mfrr <= new_state.pending_pri) {
686 reject = new_state.xisr;
687 new_state.pending_pri = mfrr;
688 new_state.xisr = XICS_IPI;
689 }
690 }
691
692 if (mfrr > old_state.mfrr) {
693 resend = new_state.need_resend;
694 new_state.need_resend = 0;
695 }
696 } while (!icp_try_update(icp, old_state, new_state, local));
697
698 /* Handle reject */
699 if (reject && reject != XICS_IPI)
700 icp_deliver_irq(xics, icp, reject, false);
701
702 /* Handle resend */
703 if (resend)
704 icp_check_resend(xics, icp);
705
706 return H_SUCCESS;
707 }
708
709 static int kvmppc_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
710 {
711 union kvmppc_icp_state state;
712 struct kvmppc_icp *icp;
713
714 icp = vcpu->arch.icp;
715 if (icp->server_num != server) {
716 icp = kvmppc_xics_find_server(vcpu->kvm, server);
717 if (!icp)
718 return H_PARAMETER;
719 }
720 state = READ_ONCE(icp->state);
721 kvmppc_set_gpr(vcpu, 4, ((u32)state.cppr << 24) | state.xisr);
722 kvmppc_set_gpr(vcpu, 5, state.mfrr);
723 return H_SUCCESS;
724 }
725
726 static noinline void kvmppc_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
727 {
728 union kvmppc_icp_state old_state, new_state;
729 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
730 struct kvmppc_icp *icp = vcpu->arch.icp;
731 u32 reject;
732
733 XICS_DBG("h_cppr vcpu %d cppr %#lx\n", vcpu->vcpu_id, cppr);
734
735 /*
736 * ICP State: Set_CPPR
737 *
738 * We can safely compare the new value with the current
739 * value outside of the transaction as the CPPR is only
740 * ever changed by the processor on itself
741 */
742 if (cppr > icp->state.cppr)
743 icp_down_cppr(xics, icp, cppr);
744 else if (cppr == icp->state.cppr)
745 return;
746
747 /*
748 * ICP State: Up_CPPR
749 *
750 * The processor is raising its priority, this can result
751 * in a rejection of a pending interrupt:
752 *
753 * ICP State: Reject_Current
754 *
755 * We can remove EE from the current processor, the update
756 * transaction will set it again if needed
757 */
758 kvmppc_book3s_dequeue_irqprio(icp->vcpu,
759 BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
760
761 do {
762 old_state = new_state = READ_ONCE(icp->state);
763
764 reject = 0;
765 new_state.cppr = cppr;
766
767 if (cppr <= new_state.pending_pri) {
768 reject = new_state.xisr;
769 new_state.xisr = 0;
770 new_state.pending_pri = 0xff;
771 }
772
773 } while (!icp_try_update(icp, old_state, new_state, true));
774
775 /*
776 * Check for rejects. They are handled by doing a new delivery
777 * attempt (see comments in icp_deliver_irq).
778 */
779 if (reject && reject != XICS_IPI)
780 icp_deliver_irq(xics, icp, reject, false);
781 }
782
783 static int ics_eoi(struct kvm_vcpu *vcpu, u32 irq)
784 {
785 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
786 struct kvmppc_icp *icp = vcpu->arch.icp;
787 struct kvmppc_ics *ics;
788 struct ics_irq_state *state;
789 u16 src;
790 u32 pq_old, pq_new;
791
792 /*
793 * ICS EOI handling: For LSI, if P bit is still set, we need to
794 * resend it.
795 *
796 * For MSI, we move Q bit into P (and clear Q). If it is set,
797 * resend it.
798 */
799
800 ics = kvmppc_xics_find_ics(xics, irq, &src);
801 if (!ics) {
802 XICS_DBG("ios_eoi: IRQ 0x%06x not found !\n", irq);
803 return H_PARAMETER;
804 }
805 state = &ics->irq_state[src];
806
807 if (state->lsi)
808 pq_new = state->pq_state;
809 else
810 do {
811 pq_old = state->pq_state;
812 pq_new = pq_old >> 1;
813 } while (cmpxchg(&state->pq_state, pq_old, pq_new) != pq_old);
814
815 if (pq_new & PQ_PRESENTED)
816 icp_deliver_irq(xics, icp, irq, false);
817
818 kvm_notify_acked_irq(vcpu->kvm, 0, irq);
819
820 return H_SUCCESS;
821 }
822
823 static noinline int kvmppc_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
824 {
825 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
826 struct kvmppc_icp *icp = vcpu->arch.icp;
827 u32 irq = xirr & 0x00ffffff;
828
829 XICS_DBG("h_eoi vcpu %d eoi %#lx\n", vcpu->vcpu_id, xirr);
830
831 /*
832 * ICP State: EOI
833 *
834 * Note: If EOI is incorrectly used by SW to lower the CPPR
835 * value (ie more favored), we do not check for rejection of
836 * a pending interrupt, this is a SW error and PAPR sepcifies
837 * that we don't have to deal with it.
838 *
839 * The sending of an EOI to the ICS is handled after the
840 * CPPR update
841 *
842 * ICP State: Down_CPPR which we handle
843 * in a separate function as it's shared with H_CPPR.
844 */
845 icp_down_cppr(xics, icp, xirr >> 24);
846
847 /* IPIs have no EOI */
848 if (irq == XICS_IPI)
849 return H_SUCCESS;
850
851 return ics_eoi(vcpu, irq);
852 }
853
854 int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
855 {
856 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
857 struct kvmppc_icp *icp = vcpu->arch.icp;
858
859 XICS_DBG("XICS_RM: H_%x completing, act: %x state: %lx tgt: %p\n",
860 hcall, icp->rm_action, icp->rm_dbgstate.raw, icp->rm_dbgtgt);
861
862 if (icp->rm_action & XICS_RM_KICK_VCPU) {
863 icp->n_rm_kick_vcpu++;
864 kvmppc_fast_vcpu_kick(icp->rm_kick_target);
865 }
866 if (icp->rm_action & XICS_RM_CHECK_RESEND) {
867 icp->n_rm_check_resend++;
868 icp_check_resend(xics, icp->rm_resend_icp);
869 }
870 if (icp->rm_action & XICS_RM_NOTIFY_EOI) {
871 icp->n_rm_notify_eoi++;
872 kvm_notify_acked_irq(vcpu->kvm, 0, icp->rm_eoied_irq);
873 }
874
875 icp->rm_action = 0;
876
877 return H_SUCCESS;
878 }
879 EXPORT_SYMBOL_GPL(kvmppc_xics_rm_complete);
880
881 int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
882 {
883 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
884 unsigned long res;
885 int rc = H_SUCCESS;
886
887 /* Check if we have an ICP */
888 if (!xics || !vcpu->arch.icp)
889 return H_HARDWARE;
890
891 /* These requests don't have real-mode implementations at present */
892 switch (req) {
893 case H_XIRR_X:
894 res = kvmppc_h_xirr(vcpu);
895 kvmppc_set_gpr(vcpu, 4, res);
896 kvmppc_set_gpr(vcpu, 5, get_tb());
897 return rc;
898 case H_IPOLL:
899 rc = kvmppc_h_ipoll(vcpu, kvmppc_get_gpr(vcpu, 4));
900 return rc;
901 }
902
903 /* Check for real mode returning too hard */
904 if (xics->real_mode && is_kvmppc_hv_enabled(vcpu->kvm))
905 return kvmppc_xics_rm_complete(vcpu, req);
906
907 switch (req) {
908 case H_XIRR:
909 res = kvmppc_h_xirr(vcpu);
910 kvmppc_set_gpr(vcpu, 4, res);
911 break;
912 case H_CPPR:
913 kvmppc_h_cppr(vcpu, kvmppc_get_gpr(vcpu, 4));
914 break;
915 case H_EOI:
916 rc = kvmppc_h_eoi(vcpu, kvmppc_get_gpr(vcpu, 4));
917 break;
918 case H_IPI:
919 rc = kvmppc_h_ipi(vcpu, kvmppc_get_gpr(vcpu, 4),
920 kvmppc_get_gpr(vcpu, 5));
921 break;
922 }
923
924 return rc;
925 }
926 EXPORT_SYMBOL_GPL(kvmppc_xics_hcall);
927
928
929 /* -- Initialisation code etc. -- */
930
931 static void xics_debugfs_irqmap(struct seq_file *m,
932 struct kvmppc_passthru_irqmap *pimap)
933 {
934 int i;
935
936 if (!pimap)
937 return;
938 seq_printf(m, "========\nPIRQ mappings: %d maps\n===========\n",
939 pimap->n_mapped);
940 for (i = 0; i < pimap->n_mapped; i++) {
941 seq_printf(m, "r_hwirq=%x, v_hwirq=%x\n",
942 pimap->mapped[i].r_hwirq, pimap->mapped[i].v_hwirq);
943 }
944 }
945
946 static int xics_debug_show(struct seq_file *m, void *private)
947 {
948 struct kvmppc_xics *xics = m->private;
949 struct kvm *kvm = xics->kvm;
950 struct kvm_vcpu *vcpu;
951 int icsid, i;
952 unsigned long flags;
953 unsigned long t_rm_kick_vcpu, t_rm_check_resend;
954 unsigned long t_rm_notify_eoi;
955 unsigned long t_reject, t_check_resend;
956
957 if (!kvm)
958 return 0;
959
960 t_rm_kick_vcpu = 0;
961 t_rm_notify_eoi = 0;
962 t_rm_check_resend = 0;
963 t_check_resend = 0;
964 t_reject = 0;
965
966 xics_debugfs_irqmap(m, kvm->arch.pimap);
967
968 seq_printf(m, "=========\nICP state\n=========\n");
969
970 kvm_for_each_vcpu(i, vcpu, kvm) {
971 struct kvmppc_icp *icp = vcpu->arch.icp;
972 union kvmppc_icp_state state;
973
974 if (!icp)
975 continue;
976
977 state.raw = READ_ONCE(icp->state.raw);
978 seq_printf(m, "cpu server %#lx XIRR:%#x PPRI:%#x CPPR:%#x MFRR:%#x OUT:%d NR:%d\n",
979 icp->server_num, state.xisr,
980 state.pending_pri, state.cppr, state.mfrr,
981 state.out_ee, state.need_resend);
982 t_rm_kick_vcpu += icp->n_rm_kick_vcpu;
983 t_rm_notify_eoi += icp->n_rm_notify_eoi;
984 t_rm_check_resend += icp->n_rm_check_resend;
985 t_check_resend += icp->n_check_resend;
986 t_reject += icp->n_reject;
987 }
988
989 seq_printf(m, "ICP Guest->Host totals: kick_vcpu=%lu check_resend=%lu notify_eoi=%lu\n",
990 t_rm_kick_vcpu, t_rm_check_resend,
991 t_rm_notify_eoi);
992 seq_printf(m, "ICP Real Mode totals: check_resend=%lu resend=%lu\n",
993 t_check_resend, t_reject);
994 for (icsid = 0; icsid <= KVMPPC_XICS_MAX_ICS_ID; icsid++) {
995 struct kvmppc_ics *ics = xics->ics[icsid];
996
997 if (!ics)
998 continue;
999
1000 seq_printf(m, "=========\nICS state for ICS 0x%x\n=========\n",
1001 icsid);
1002
1003 local_irq_save(flags);
1004 arch_spin_lock(&ics->lock);
1005
1006 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
1007 struct ics_irq_state *irq = &ics->irq_state[i];
1008
1009 seq_printf(m, "irq 0x%06x: server %#x prio %#x save prio %#x pq_state %d resend %d masked pending %d\n",
1010 irq->number, irq->server, irq->priority,
1011 irq->saved_priority, irq->pq_state,
1012 irq->resend, irq->masked_pending);
1013
1014 }
1015 arch_spin_unlock(&ics->lock);
1016 local_irq_restore(flags);
1017 }
1018 return 0;
1019 }
1020
1021 static int xics_debug_open(struct inode *inode, struct file *file)
1022 {
1023 return single_open(file, xics_debug_show, inode->i_private);
1024 }
1025
1026 static const struct file_operations xics_debug_fops = {
1027 .open = xics_debug_open,
1028 .read = seq_read,
1029 .llseek = seq_lseek,
1030 .release = single_release,
1031 };
1032
1033 static void xics_debugfs_init(struct kvmppc_xics *xics)
1034 {
1035 char *name;
1036
1037 name = kasprintf(GFP_KERNEL, "kvm-xics-%p", xics);
1038 if (!name) {
1039 pr_err("%s: no memory for name\n", __func__);
1040 return;
1041 }
1042
1043 xics->dentry = debugfs_create_file(name, S_IRUGO, powerpc_debugfs_root,
1044 xics, &xics_debug_fops);
1045
1046 pr_debug("%s: created %s\n", __func__, name);
1047 kfree(name);
1048 }
1049
1050 static struct kvmppc_ics *kvmppc_xics_create_ics(struct kvm *kvm,
1051 struct kvmppc_xics *xics, int irq)
1052 {
1053 struct kvmppc_ics *ics;
1054 int i, icsid;
1055
1056 icsid = irq >> KVMPPC_XICS_ICS_SHIFT;
1057
1058 mutex_lock(&kvm->lock);
1059
1060 /* ICS already exists - somebody else got here first */
1061 if (xics->ics[icsid])
1062 goto out;
1063
1064 /* Create the ICS */
1065 ics = kzalloc(sizeof(struct kvmppc_ics), GFP_KERNEL);
1066 if (!ics)
1067 goto out;
1068
1069 ics->icsid = icsid;
1070
1071 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
1072 ics->irq_state[i].number = (icsid << KVMPPC_XICS_ICS_SHIFT) | i;
1073 ics->irq_state[i].priority = MASKED;
1074 ics->irq_state[i].saved_priority = MASKED;
1075 }
1076 smp_wmb();
1077 xics->ics[icsid] = ics;
1078
1079 if (icsid > xics->max_icsid)
1080 xics->max_icsid = icsid;
1081
1082 out:
1083 mutex_unlock(&kvm->lock);
1084 return xics->ics[icsid];
1085 }
1086
1087 int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu, unsigned long server_num)
1088 {
1089 struct kvmppc_icp *icp;
1090
1091 if (!vcpu->kvm->arch.xics)
1092 return -ENODEV;
1093
1094 if (kvmppc_xics_find_server(vcpu->kvm, server_num))
1095 return -EEXIST;
1096
1097 icp = kzalloc(sizeof(struct kvmppc_icp), GFP_KERNEL);
1098 if (!icp)
1099 return -ENOMEM;
1100
1101 icp->vcpu = vcpu;
1102 icp->server_num = server_num;
1103 icp->state.mfrr = MASKED;
1104 icp->state.pending_pri = MASKED;
1105 vcpu->arch.icp = icp;
1106
1107 XICS_DBG("created server for vcpu %d\n", vcpu->vcpu_id);
1108
1109 return 0;
1110 }
1111
1112 u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu)
1113 {
1114 struct kvmppc_icp *icp = vcpu->arch.icp;
1115 union kvmppc_icp_state state;
1116
1117 if (!icp)
1118 return 0;
1119 state = icp->state;
1120 return ((u64)state.cppr << KVM_REG_PPC_ICP_CPPR_SHIFT) |
1121 ((u64)state.xisr << KVM_REG_PPC_ICP_XISR_SHIFT) |
1122 ((u64)state.mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT) |
1123 ((u64)state.pending_pri << KVM_REG_PPC_ICP_PPRI_SHIFT);
1124 }
1125
1126 int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
1127 {
1128 struct kvmppc_icp *icp = vcpu->arch.icp;
1129 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
1130 union kvmppc_icp_state old_state, new_state;
1131 struct kvmppc_ics *ics;
1132 u8 cppr, mfrr, pending_pri;
1133 u32 xisr;
1134 u16 src;
1135 bool resend;
1136
1137 if (!icp || !xics)
1138 return -ENOENT;
1139
1140 cppr = icpval >> KVM_REG_PPC_ICP_CPPR_SHIFT;
1141 xisr = (icpval >> KVM_REG_PPC_ICP_XISR_SHIFT) &
1142 KVM_REG_PPC_ICP_XISR_MASK;
1143 mfrr = icpval >> KVM_REG_PPC_ICP_MFRR_SHIFT;
1144 pending_pri = icpval >> KVM_REG_PPC_ICP_PPRI_SHIFT;
1145
1146 /* Require the new state to be internally consistent */
1147 if (xisr == 0) {
1148 if (pending_pri != 0xff)
1149 return -EINVAL;
1150 } else if (xisr == XICS_IPI) {
1151 if (pending_pri != mfrr || pending_pri >= cppr)
1152 return -EINVAL;
1153 } else {
1154 if (pending_pri >= mfrr || pending_pri >= cppr)
1155 return -EINVAL;
1156 ics = kvmppc_xics_find_ics(xics, xisr, &src);
1157 if (!ics)
1158 return -EINVAL;
1159 }
1160
1161 new_state.raw = 0;
1162 new_state.cppr = cppr;
1163 new_state.xisr = xisr;
1164 new_state.mfrr = mfrr;
1165 new_state.pending_pri = pending_pri;
1166
1167 /*
1168 * Deassert the CPU interrupt request.
1169 * icp_try_update will reassert it if necessary.
1170 */
1171 kvmppc_book3s_dequeue_irqprio(icp->vcpu,
1172 BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
1173
1174 /*
1175 * Note that if we displace an interrupt from old_state.xisr,
1176 * we don't mark it as rejected. We expect userspace to set
1177 * the state of the interrupt sources to be consistent with
1178 * the ICP states (either before or afterwards, which doesn't
1179 * matter). We do handle resends due to CPPR becoming less
1180 * favoured because that is necessary to end up with a
1181 * consistent state in the situation where userspace restores
1182 * the ICS states before the ICP states.
1183 */
1184 do {
1185 old_state = READ_ONCE(icp->state);
1186
1187 if (new_state.mfrr <= old_state.mfrr) {
1188 resend = false;
1189 new_state.need_resend = old_state.need_resend;
1190 } else {
1191 resend = old_state.need_resend;
1192 new_state.need_resend = 0;
1193 }
1194 } while (!icp_try_update(icp, old_state, new_state, false));
1195
1196 if (resend)
1197 icp_check_resend(xics, icp);
1198
1199 return 0;
1200 }
1201
1202 static int xics_get_source(struct kvmppc_xics *xics, long irq, u64 addr)
1203 {
1204 int ret;
1205 struct kvmppc_ics *ics;
1206 struct ics_irq_state *irqp;
1207 u64 __user *ubufp = (u64 __user *) addr;
1208 u16 idx;
1209 u64 val, prio;
1210 unsigned long flags;
1211
1212 ics = kvmppc_xics_find_ics(xics, irq, &idx);
1213 if (!ics)
1214 return -ENOENT;
1215
1216 irqp = &ics->irq_state[idx];
1217 local_irq_save(flags);
1218 arch_spin_lock(&ics->lock);
1219 ret = -ENOENT;
1220 if (irqp->exists) {
1221 val = irqp->server;
1222 prio = irqp->priority;
1223 if (prio == MASKED) {
1224 val |= KVM_XICS_MASKED;
1225 prio = irqp->saved_priority;
1226 }
1227 val |= prio << KVM_XICS_PRIORITY_SHIFT;
1228 if (irqp->lsi) {
1229 val |= KVM_XICS_LEVEL_SENSITIVE;
1230 if (irqp->pq_state & PQ_PRESENTED)
1231 val |= KVM_XICS_PENDING;
1232 } else if (irqp->masked_pending || irqp->resend)
1233 val |= KVM_XICS_PENDING;
1234
1235 if (irqp->pq_state & PQ_PRESENTED)
1236 val |= KVM_XICS_PRESENTED;
1237
1238 if (irqp->pq_state & PQ_QUEUED)
1239 val |= KVM_XICS_QUEUED;
1240
1241 ret = 0;
1242 }
1243 arch_spin_unlock(&ics->lock);
1244 local_irq_restore(flags);
1245
1246 if (!ret && put_user(val, ubufp))
1247 ret = -EFAULT;
1248
1249 return ret;
1250 }
1251
1252 static int xics_set_source(struct kvmppc_xics *xics, long irq, u64 addr)
1253 {
1254 struct kvmppc_ics *ics;
1255 struct ics_irq_state *irqp;
1256 u64 __user *ubufp = (u64 __user *) addr;
1257 u16 idx;
1258 u64 val;
1259 u8 prio;
1260 u32 server;
1261 unsigned long flags;
1262
1263 if (irq < KVMPPC_XICS_FIRST_IRQ || irq >= KVMPPC_XICS_NR_IRQS)
1264 return -ENOENT;
1265
1266 ics = kvmppc_xics_find_ics(xics, irq, &idx);
1267 if (!ics) {
1268 ics = kvmppc_xics_create_ics(xics->kvm, xics, irq);
1269 if (!ics)
1270 return -ENOMEM;
1271 }
1272 irqp = &ics->irq_state[idx];
1273 if (get_user(val, ubufp))
1274 return -EFAULT;
1275
1276 server = val & KVM_XICS_DESTINATION_MASK;
1277 prio = val >> KVM_XICS_PRIORITY_SHIFT;
1278 if (prio != MASKED &&
1279 kvmppc_xics_find_server(xics->kvm, server) == NULL)
1280 return -EINVAL;
1281
1282 local_irq_save(flags);
1283 arch_spin_lock(&ics->lock);
1284 irqp->server = server;
1285 irqp->saved_priority = prio;
1286 if (val & KVM_XICS_MASKED)
1287 prio = MASKED;
1288 irqp->priority = prio;
1289 irqp->resend = 0;
1290 irqp->masked_pending = 0;
1291 irqp->lsi = 0;
1292 irqp->pq_state = 0;
1293 if (val & KVM_XICS_LEVEL_SENSITIVE)
1294 irqp->lsi = 1;
1295 /* If PENDING, set P in case P is not saved because of old code */
1296 if (val & KVM_XICS_PRESENTED || val & KVM_XICS_PENDING)
1297 irqp->pq_state |= PQ_PRESENTED;
1298 if (val & KVM_XICS_QUEUED)
1299 irqp->pq_state |= PQ_QUEUED;
1300 irqp->exists = 1;
1301 arch_spin_unlock(&ics->lock);
1302 local_irq_restore(flags);
1303
1304 if (val & KVM_XICS_PENDING)
1305 icp_deliver_irq(xics, NULL, irqp->number, false);
1306
1307 return 0;
1308 }
1309
1310 int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
1311 bool line_status)
1312 {
1313 struct kvmppc_xics *xics = kvm->arch.xics;
1314
1315 if (!xics)
1316 return -ENODEV;
1317 return ics_deliver_irq(xics, irq, level);
1318 }
1319
1320 int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *irq_entry,
1321 struct kvm *kvm, int irq_source_id,
1322 int level, bool line_status)
1323 {
1324 return kvm_set_irq(kvm, irq_source_id, irq_entry->gsi,
1325 level, line_status);
1326 }
1327
1328 static int xics_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1329 {
1330 struct kvmppc_xics *xics = dev->private;
1331
1332 switch (attr->group) {
1333 case KVM_DEV_XICS_GRP_SOURCES:
1334 return xics_set_source(xics, attr->attr, attr->addr);
1335 }
1336 return -ENXIO;
1337 }
1338
1339 static int xics_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1340 {
1341 struct kvmppc_xics *xics = dev->private;
1342
1343 switch (attr->group) {
1344 case KVM_DEV_XICS_GRP_SOURCES:
1345 return xics_get_source(xics, attr->attr, attr->addr);
1346 }
1347 return -ENXIO;
1348 }
1349
1350 static int xics_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1351 {
1352 switch (attr->group) {
1353 case KVM_DEV_XICS_GRP_SOURCES:
1354 if (attr->attr >= KVMPPC_XICS_FIRST_IRQ &&
1355 attr->attr < KVMPPC_XICS_NR_IRQS)
1356 return 0;
1357 break;
1358 }
1359 return -ENXIO;
1360 }
1361
1362 static void kvmppc_xics_free(struct kvm_device *dev)
1363 {
1364 struct kvmppc_xics *xics = dev->private;
1365 int i;
1366 struct kvm *kvm = xics->kvm;
1367
1368 debugfs_remove(xics->dentry);
1369
1370 if (kvm)
1371 kvm->arch.xics = NULL;
1372
1373 for (i = 0; i <= xics->max_icsid; i++)
1374 kfree(xics->ics[i]);
1375 kfree(xics);
1376 kfree(dev);
1377 }
1378
1379 static int kvmppc_xics_create(struct kvm_device *dev, u32 type)
1380 {
1381 struct kvmppc_xics *xics;
1382 struct kvm *kvm = dev->kvm;
1383 int ret = 0;
1384
1385 xics = kzalloc(sizeof(*xics), GFP_KERNEL);
1386 if (!xics)
1387 return -ENOMEM;
1388
1389 dev->private = xics;
1390 xics->dev = dev;
1391 xics->kvm = kvm;
1392
1393 /* Already there ? */
1394 if (kvm->arch.xics)
1395 ret = -EEXIST;
1396 else
1397 kvm->arch.xics = xics;
1398
1399 if (ret) {
1400 kfree(xics);
1401 return ret;
1402 }
1403
1404 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1405 if (cpu_has_feature(CPU_FTR_ARCH_206)) {
1406 /* Enable real mode support */
1407 xics->real_mode = ENABLE_REALMODE;
1408 xics->real_mode_dbg = DEBUG_REALMODE;
1409 }
1410 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
1411
1412 return 0;
1413 }
1414
1415 static void kvmppc_xics_init(struct kvm_device *dev)
1416 {
1417 struct kvmppc_xics *xics = (struct kvmppc_xics *)dev->private;
1418
1419 xics_debugfs_init(xics);
1420 }
1421
1422 struct kvm_device_ops kvm_xics_ops = {
1423 .name = "kvm-xics",
1424 .create = kvmppc_xics_create,
1425 .init = kvmppc_xics_init,
1426 .destroy = kvmppc_xics_free,
1427 .set_attr = xics_set_attr,
1428 .get_attr = xics_get_attr,
1429 .has_attr = xics_has_attr,
1430 };
1431
1432 int kvmppc_xics_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
1433 u32 xcpu)
1434 {
1435 struct kvmppc_xics *xics = dev->private;
1436 int r = -EBUSY;
1437
1438 if (dev->ops != &kvm_xics_ops)
1439 return -EPERM;
1440 if (xics->kvm != vcpu->kvm)
1441 return -EPERM;
1442 if (vcpu->arch.irq_type)
1443 return -EBUSY;
1444
1445 r = kvmppc_xics_create_icp(vcpu, xcpu);
1446 if (!r)
1447 vcpu->arch.irq_type = KVMPPC_IRQ_XICS;
1448
1449 return r;
1450 }
1451
1452 void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu)
1453 {
1454 if (!vcpu->arch.icp)
1455 return;
1456 kfree(vcpu->arch.icp);
1457 vcpu->arch.icp = NULL;
1458 vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT;
1459 }
1460
1461 static int xics_set_irq(struct kvm_kernel_irq_routing_entry *e,
1462 struct kvm *kvm, int irq_source_id, int level,
1463 bool line_status)
1464 {
1465 return kvm_set_irq(kvm, irq_source_id, e->gsi, level, line_status);
1466 }
1467
1468 int kvm_irq_map_gsi(struct kvm *kvm,
1469 struct kvm_kernel_irq_routing_entry *entries, int gsi)
1470 {
1471 entries->gsi = gsi;
1472 entries->type = KVM_IRQ_ROUTING_IRQCHIP;
1473 entries->set = xics_set_irq;
1474 entries->irqchip.irqchip = 0;
1475 entries->irqchip.pin = gsi;
1476 return 1;
1477 }
1478
1479 int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin)
1480 {
1481 return pin;
1482 }
1483
1484 void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long irq,
1485 unsigned long host_irq)
1486 {
1487 struct kvmppc_xics *xics = kvm->arch.xics;
1488 struct kvmppc_ics *ics;
1489 u16 idx;
1490
1491 ics = kvmppc_xics_find_ics(xics, irq, &idx);
1492 if (!ics)
1493 return;
1494
1495 ics->irq_state[idx].host_irq = host_irq;
1496 ics->irq_state[idx].intr_cpu = -1;
1497 }
1498 EXPORT_SYMBOL_GPL(kvmppc_xics_set_mapped);
1499
1500 void kvmppc_xics_clr_mapped(struct kvm *kvm, unsigned long irq,
1501 unsigned long host_irq)
1502 {
1503 struct kvmppc_xics *xics = kvm->arch.xics;
1504 struct kvmppc_ics *ics;
1505 u16 idx;
1506
1507 ics = kvmppc_xics_find_ics(xics, irq, &idx);
1508 if (!ics)
1509 return;
1510
1511 ics->irq_state[idx].host_irq = 0;
1512 }
1513 EXPORT_SYMBOL_GPL(kvmppc_xics_clr_mapped);