]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - arch/powerpc/kvm/book3s_hv_rm_xics.c
Merge tag 'tilcdc-4.10-fixes' of https://github.com/jsarha/linux into drm-fixes
[mirror_ubuntu-zesty-kernel.git] / arch / powerpc / kvm / book3s_hv_rm_xics.c
1 /*
2 * Copyright 2012 Michael Ellerman, IBM Corporation.
3 * Copyright 2012 Benjamin Herrenschmidt, IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 */
9
10 #include <linux/kernel.h>
11 #include <linux/kvm_host.h>
12 #include <linux/err.h>
13 #include <linux/kernel_stat.h>
14
15 #include <asm/kvm_book3s.h>
16 #include <asm/kvm_ppc.h>
17 #include <asm/hvcall.h>
18 #include <asm/xics.h>
19 #include <asm/debug.h>
20 #include <asm/synch.h>
21 #include <asm/cputhreads.h>
22 #include <asm/pgtable.h>
23 #include <asm/ppc-opcode.h>
24 #include <asm/pnv-pci.h>
25 #include <asm/opal.h>
26 #include <asm/smp.h>
27
28 #include "book3s_xics.h"
29
30 #define DEBUG_PASSUP
31
32 int h_ipi_redirect = 1;
33 EXPORT_SYMBOL(h_ipi_redirect);
34 int kvm_irq_bypass = 1;
35 EXPORT_SYMBOL(kvm_irq_bypass);
36
37 static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
38 u32 new_irq);
39 static int xics_opal_rm_set_server(unsigned int hw_irq, int server_cpu);
40
41 /* -- ICS routines -- */
42 static void ics_rm_check_resend(struct kvmppc_xics *xics,
43 struct kvmppc_ics *ics, struct kvmppc_icp *icp)
44 {
45 int i;
46
47 arch_spin_lock(&ics->lock);
48
49 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
50 struct ics_irq_state *state = &ics->irq_state[i];
51
52 if (!state->resend)
53 continue;
54
55 arch_spin_unlock(&ics->lock);
56 icp_rm_deliver_irq(xics, icp, state->number);
57 arch_spin_lock(&ics->lock);
58 }
59
60 arch_spin_unlock(&ics->lock);
61 }
62
63 /* -- ICP routines -- */
64
65 #ifdef CONFIG_SMP
66 static inline void icp_send_hcore_msg(int hcore, struct kvm_vcpu *vcpu)
67 {
68 int hcpu;
69
70 hcpu = hcore << threads_shift;
71 kvmppc_host_rm_ops_hv->rm_core[hcore].rm_data = vcpu;
72 smp_muxed_ipi_set_message(hcpu, PPC_MSG_RM_HOST_ACTION);
73 if (paca[hcpu].kvm_hstate.xics_phys)
74 icp_native_cause_ipi_rm(hcpu);
75 else
76 opal_rm_int_set_mfrr(get_hard_smp_processor_id(hcpu),
77 IPI_PRIORITY);
78 }
79 #else
80 static inline void icp_send_hcore_msg(int hcore, struct kvm_vcpu *vcpu) { }
81 #endif
82
83 /*
84 * We start the search from our current CPU Id in the core map
85 * and go in a circle until we get back to our ID looking for a
86 * core that is running in host context and that hasn't already
87 * been targeted for another rm_host_ops.
88 *
89 * In the future, could consider using a fairer algorithm (one
90 * that distributes the IPIs better)
91 *
92 * Returns -1, if no CPU could be found in the host
93 * Else, returns a CPU Id which has been reserved for use
94 */
95 static inline int grab_next_hostcore(int start,
96 struct kvmppc_host_rm_core *rm_core, int max, int action)
97 {
98 bool success;
99 int core;
100 union kvmppc_rm_state old, new;
101
102 for (core = start + 1; core < max; core++) {
103 old = new = READ_ONCE(rm_core[core].rm_state);
104
105 if (!old.in_host || old.rm_action)
106 continue;
107
108 /* Try to grab this host core if not taken already. */
109 new.rm_action = action;
110
111 success = cmpxchg64(&rm_core[core].rm_state.raw,
112 old.raw, new.raw) == old.raw;
113 if (success) {
114 /*
115 * Make sure that the store to the rm_action is made
116 * visible before we return to caller (and the
117 * subsequent store to rm_data) to synchronize with
118 * the IPI handler.
119 */
120 smp_wmb();
121 return core;
122 }
123 }
124
125 return -1;
126 }
127
128 static inline int find_available_hostcore(int action)
129 {
130 int core;
131 int my_core = smp_processor_id() >> threads_shift;
132 struct kvmppc_host_rm_core *rm_core = kvmppc_host_rm_ops_hv->rm_core;
133
134 core = grab_next_hostcore(my_core, rm_core, cpu_nr_cores(), action);
135 if (core == -1)
136 core = grab_next_hostcore(core, rm_core, my_core, action);
137
138 return core;
139 }
140
141 static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu,
142 struct kvm_vcpu *this_vcpu)
143 {
144 struct kvmppc_icp *this_icp = this_vcpu->arch.icp;
145 int cpu;
146 int hcore;
147
148 /* Mark the target VCPU as having an interrupt pending */
149 vcpu->stat.queue_intr++;
150 set_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
151
152 /* Kick self ? Just set MER and return */
153 if (vcpu == this_vcpu) {
154 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_MER);
155 return;
156 }
157
158 /*
159 * Check if the core is loaded,
160 * if not, find an available host core to post to wake the VCPU,
161 * if we can't find one, set up state to eventually return too hard.
162 */
163 cpu = vcpu->arch.thread_cpu;
164 if (cpu < 0 || cpu >= nr_cpu_ids) {
165 hcore = -1;
166 if (kvmppc_host_rm_ops_hv && h_ipi_redirect)
167 hcore = find_available_hostcore(XICS_RM_KICK_VCPU);
168 if (hcore != -1) {
169 icp_send_hcore_msg(hcore, vcpu);
170 } else {
171 this_icp->rm_action |= XICS_RM_KICK_VCPU;
172 this_icp->rm_kick_target = vcpu;
173 }
174 return;
175 }
176
177 smp_mb();
178 kvmhv_rm_send_ipi(cpu);
179 }
180
181 static void icp_rm_clr_vcpu_irq(struct kvm_vcpu *vcpu)
182 {
183 /* Note: Only called on self ! */
184 clear_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL,
185 &vcpu->arch.pending_exceptions);
186 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~LPCR_MER);
187 }
188
189 static inline bool icp_rm_try_update(struct kvmppc_icp *icp,
190 union kvmppc_icp_state old,
191 union kvmppc_icp_state new)
192 {
193 struct kvm_vcpu *this_vcpu = local_paca->kvm_hstate.kvm_vcpu;
194 bool success;
195
196 /* Calculate new output value */
197 new.out_ee = (new.xisr && (new.pending_pri < new.cppr));
198
199 /* Attempt atomic update */
200 success = cmpxchg64(&icp->state.raw, old.raw, new.raw) == old.raw;
201 if (!success)
202 goto bail;
203
204 /*
205 * Check for output state update
206 *
207 * Note that this is racy since another processor could be updating
208 * the state already. This is why we never clear the interrupt output
209 * here, we only ever set it. The clear only happens prior to doing
210 * an update and only by the processor itself. Currently we do it
211 * in Accept (H_XIRR) and Up_Cppr (H_XPPR).
212 *
213 * We also do not try to figure out whether the EE state has changed,
214 * we unconditionally set it if the new state calls for it. The reason
215 * for that is that we opportunistically remove the pending interrupt
216 * flag when raising CPPR, so we need to set it back here if an
217 * interrupt is still pending.
218 */
219 if (new.out_ee)
220 icp_rm_set_vcpu_irq(icp->vcpu, this_vcpu);
221
222 /* Expose the state change for debug purposes */
223 this_vcpu->arch.icp->rm_dbgstate = new;
224 this_vcpu->arch.icp->rm_dbgtgt = icp->vcpu;
225
226 bail:
227 return success;
228 }
229
230 static inline int check_too_hard(struct kvmppc_xics *xics,
231 struct kvmppc_icp *icp)
232 {
233 return (xics->real_mode_dbg || icp->rm_action) ? H_TOO_HARD : H_SUCCESS;
234 }
235
236 static void icp_rm_check_resend(struct kvmppc_xics *xics,
237 struct kvmppc_icp *icp)
238 {
239 u32 icsid;
240
241 /* Order this load with the test for need_resend in the caller */
242 smp_rmb();
243 for_each_set_bit(icsid, icp->resend_map, xics->max_icsid + 1) {
244 struct kvmppc_ics *ics = xics->ics[icsid];
245
246 if (!test_and_clear_bit(icsid, icp->resend_map))
247 continue;
248 if (!ics)
249 continue;
250 ics_rm_check_resend(xics, ics, icp);
251 }
252 }
253
254 static bool icp_rm_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority,
255 u32 *reject)
256 {
257 union kvmppc_icp_state old_state, new_state;
258 bool success;
259
260 do {
261 old_state = new_state = READ_ONCE(icp->state);
262
263 *reject = 0;
264
265 /* See if we can deliver */
266 success = new_state.cppr > priority &&
267 new_state.mfrr > priority &&
268 new_state.pending_pri > priority;
269
270 /*
271 * If we can, check for a rejection and perform the
272 * delivery
273 */
274 if (success) {
275 *reject = new_state.xisr;
276 new_state.xisr = irq;
277 new_state.pending_pri = priority;
278 } else {
279 /*
280 * If we failed to deliver we set need_resend
281 * so a subsequent CPPR state change causes us
282 * to try a new delivery.
283 */
284 new_state.need_resend = true;
285 }
286
287 } while (!icp_rm_try_update(icp, old_state, new_state));
288
289 return success;
290 }
291
292 static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
293 u32 new_irq)
294 {
295 struct ics_irq_state *state;
296 struct kvmppc_ics *ics;
297 u32 reject;
298 u16 src;
299
300 /*
301 * This is used both for initial delivery of an interrupt and
302 * for subsequent rejection.
303 *
304 * Rejection can be racy vs. resends. We have evaluated the
305 * rejection in an atomic ICP transaction which is now complete,
306 * so potentially the ICP can already accept the interrupt again.
307 *
308 * So we need to retry the delivery. Essentially the reject path
309 * boils down to a failed delivery. Always.
310 *
311 * Now the interrupt could also have moved to a different target,
312 * thus we may need to re-do the ICP lookup as well
313 */
314
315 again:
316 /* Get the ICS state and lock it */
317 ics = kvmppc_xics_find_ics(xics, new_irq, &src);
318 if (!ics) {
319 /* Unsafe increment, but this does not need to be accurate */
320 xics->err_noics++;
321 return;
322 }
323 state = &ics->irq_state[src];
324
325 /* Get a lock on the ICS */
326 arch_spin_lock(&ics->lock);
327
328 /* Get our server */
329 if (!icp || state->server != icp->server_num) {
330 icp = kvmppc_xics_find_server(xics->kvm, state->server);
331 if (!icp) {
332 /* Unsafe increment again*/
333 xics->err_noicp++;
334 goto out;
335 }
336 }
337
338 /* Clear the resend bit of that interrupt */
339 state->resend = 0;
340
341 /*
342 * If masked, bail out
343 *
344 * Note: PAPR doesn't mention anything about masked pending
345 * when doing a resend, only when doing a delivery.
346 *
347 * However that would have the effect of losing a masked
348 * interrupt that was rejected and isn't consistent with
349 * the whole masked_pending business which is about not
350 * losing interrupts that occur while masked.
351 *
352 * I don't differentiate normal deliveries and resends, this
353 * implementation will differ from PAPR and not lose such
354 * interrupts.
355 */
356 if (state->priority == MASKED) {
357 state->masked_pending = 1;
358 goto out;
359 }
360
361 /*
362 * Try the delivery, this will set the need_resend flag
363 * in the ICP as part of the atomic transaction if the
364 * delivery is not possible.
365 *
366 * Note that if successful, the new delivery might have itself
367 * rejected an interrupt that was "delivered" before we took the
368 * ics spin lock.
369 *
370 * In this case we do the whole sequence all over again for the
371 * new guy. We cannot assume that the rejected interrupt is less
372 * favored than the new one, and thus doesn't need to be delivered,
373 * because by the time we exit icp_rm_try_to_deliver() the target
374 * processor may well have already consumed & completed it, and thus
375 * the rejected interrupt might actually be already acceptable.
376 */
377 if (icp_rm_try_to_deliver(icp, new_irq, state->priority, &reject)) {
378 /*
379 * Delivery was successful, did we reject somebody else ?
380 */
381 if (reject && reject != XICS_IPI) {
382 arch_spin_unlock(&ics->lock);
383 new_irq = reject;
384 goto again;
385 }
386 } else {
387 /*
388 * We failed to deliver the interrupt we need to set the
389 * resend map bit and mark the ICS state as needing a resend
390 */
391 set_bit(ics->icsid, icp->resend_map);
392 state->resend = 1;
393
394 /*
395 * If the need_resend flag got cleared in the ICP some time
396 * between icp_rm_try_to_deliver() atomic update and now, then
397 * we know it might have missed the resend_map bit. So we
398 * retry
399 */
400 smp_mb();
401 if (!icp->state.need_resend) {
402 arch_spin_unlock(&ics->lock);
403 goto again;
404 }
405 }
406 out:
407 arch_spin_unlock(&ics->lock);
408 }
409
410 static void icp_rm_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
411 u8 new_cppr)
412 {
413 union kvmppc_icp_state old_state, new_state;
414 bool resend;
415
416 /*
417 * This handles several related states in one operation:
418 *
419 * ICP State: Down_CPPR
420 *
421 * Load CPPR with new value and if the XISR is 0
422 * then check for resends:
423 *
424 * ICP State: Resend
425 *
426 * If MFRR is more favored than CPPR, check for IPIs
427 * and notify ICS of a potential resend. This is done
428 * asynchronously (when used in real mode, we will have
429 * to exit here).
430 *
431 * We do not handle the complete Check_IPI as documented
432 * here. In the PAPR, this state will be used for both
433 * Set_MFRR and Down_CPPR. However, we know that we aren't
434 * changing the MFRR state here so we don't need to handle
435 * the case of an MFRR causing a reject of a pending irq,
436 * this will have been handled when the MFRR was set in the
437 * first place.
438 *
439 * Thus we don't have to handle rejects, only resends.
440 *
441 * When implementing real mode for HV KVM, resend will lead to
442 * a H_TOO_HARD return and the whole transaction will be handled
443 * in virtual mode.
444 */
445 do {
446 old_state = new_state = READ_ONCE(icp->state);
447
448 /* Down_CPPR */
449 new_state.cppr = new_cppr;
450
451 /*
452 * Cut down Resend / Check_IPI / IPI
453 *
454 * The logic is that we cannot have a pending interrupt
455 * trumped by an IPI at this point (see above), so we
456 * know that either the pending interrupt is already an
457 * IPI (in which case we don't care to override it) or
458 * it's either more favored than us or non existent
459 */
460 if (new_state.mfrr < new_cppr &&
461 new_state.mfrr <= new_state.pending_pri) {
462 new_state.pending_pri = new_state.mfrr;
463 new_state.xisr = XICS_IPI;
464 }
465
466 /* Latch/clear resend bit */
467 resend = new_state.need_resend;
468 new_state.need_resend = 0;
469
470 } while (!icp_rm_try_update(icp, old_state, new_state));
471
472 /*
473 * Now handle resend checks. Those are asynchronous to the ICP
474 * state update in HW (ie bus transactions) so we can handle them
475 * separately here as well.
476 */
477 if (resend) {
478 icp->n_check_resend++;
479 icp_rm_check_resend(xics, icp);
480 }
481 }
482
483
484 unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu)
485 {
486 union kvmppc_icp_state old_state, new_state;
487 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
488 struct kvmppc_icp *icp = vcpu->arch.icp;
489 u32 xirr;
490
491 if (!xics || !xics->real_mode)
492 return H_TOO_HARD;
493
494 /* First clear the interrupt */
495 icp_rm_clr_vcpu_irq(icp->vcpu);
496
497 /*
498 * ICP State: Accept_Interrupt
499 *
500 * Return the pending interrupt (if any) along with the
501 * current CPPR, then clear the XISR & set CPPR to the
502 * pending priority
503 */
504 do {
505 old_state = new_state = READ_ONCE(icp->state);
506
507 xirr = old_state.xisr | (((u32)old_state.cppr) << 24);
508 if (!old_state.xisr)
509 break;
510 new_state.cppr = new_state.pending_pri;
511 new_state.pending_pri = 0xff;
512 new_state.xisr = 0;
513
514 } while (!icp_rm_try_update(icp, old_state, new_state));
515
516 /* Return the result in GPR4 */
517 vcpu->arch.gpr[4] = xirr;
518
519 return check_too_hard(xics, icp);
520 }
521
522 int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
523 unsigned long mfrr)
524 {
525 union kvmppc_icp_state old_state, new_state;
526 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
527 struct kvmppc_icp *icp, *this_icp = vcpu->arch.icp;
528 u32 reject;
529 bool resend;
530 bool local;
531
532 if (!xics || !xics->real_mode)
533 return H_TOO_HARD;
534
535 local = this_icp->server_num == server;
536 if (local)
537 icp = this_icp;
538 else
539 icp = kvmppc_xics_find_server(vcpu->kvm, server);
540 if (!icp)
541 return H_PARAMETER;
542
543 /*
544 * ICP state: Set_MFRR
545 *
546 * If the CPPR is more favored than the new MFRR, then
547 * nothing needs to be done as there can be no XISR to
548 * reject.
549 *
550 * ICP state: Check_IPI
551 *
552 * If the CPPR is less favored, then we might be replacing
553 * an interrupt, and thus need to possibly reject it.
554 *
555 * ICP State: IPI
556 *
557 * Besides rejecting any pending interrupts, we also
558 * update XISR and pending_pri to mark IPI as pending.
559 *
560 * PAPR does not describe this state, but if the MFRR is being
561 * made less favored than its earlier value, there might be
562 * a previously-rejected interrupt needing to be resent.
563 * Ideally, we would want to resend only if
564 * prio(pending_interrupt) < mfrr &&
565 * prio(pending_interrupt) < cppr
566 * where pending interrupt is the one that was rejected. But
567 * we don't have that state, so we simply trigger a resend
568 * whenever the MFRR is made less favored.
569 */
570 do {
571 old_state = new_state = READ_ONCE(icp->state);
572
573 /* Set_MFRR */
574 new_state.mfrr = mfrr;
575
576 /* Check_IPI */
577 reject = 0;
578 resend = false;
579 if (mfrr < new_state.cppr) {
580 /* Reject a pending interrupt if not an IPI */
581 if (mfrr <= new_state.pending_pri) {
582 reject = new_state.xisr;
583 new_state.pending_pri = mfrr;
584 new_state.xisr = XICS_IPI;
585 }
586 }
587
588 if (mfrr > old_state.mfrr) {
589 resend = new_state.need_resend;
590 new_state.need_resend = 0;
591 }
592 } while (!icp_rm_try_update(icp, old_state, new_state));
593
594 /* Handle reject in real mode */
595 if (reject && reject != XICS_IPI) {
596 this_icp->n_reject++;
597 icp_rm_deliver_irq(xics, icp, reject);
598 }
599
600 /* Handle resends in real mode */
601 if (resend) {
602 this_icp->n_check_resend++;
603 icp_rm_check_resend(xics, icp);
604 }
605
606 return check_too_hard(xics, this_icp);
607 }
608
609 int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
610 {
611 union kvmppc_icp_state old_state, new_state;
612 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
613 struct kvmppc_icp *icp = vcpu->arch.icp;
614 u32 reject;
615
616 if (!xics || !xics->real_mode)
617 return H_TOO_HARD;
618
619 /*
620 * ICP State: Set_CPPR
621 *
622 * We can safely compare the new value with the current
623 * value outside of the transaction as the CPPR is only
624 * ever changed by the processor on itself
625 */
626 if (cppr > icp->state.cppr) {
627 icp_rm_down_cppr(xics, icp, cppr);
628 goto bail;
629 } else if (cppr == icp->state.cppr)
630 return H_SUCCESS;
631
632 /*
633 * ICP State: Up_CPPR
634 *
635 * The processor is raising its priority, this can result
636 * in a rejection of a pending interrupt:
637 *
638 * ICP State: Reject_Current
639 *
640 * We can remove EE from the current processor, the update
641 * transaction will set it again if needed
642 */
643 icp_rm_clr_vcpu_irq(icp->vcpu);
644
645 do {
646 old_state = new_state = READ_ONCE(icp->state);
647
648 reject = 0;
649 new_state.cppr = cppr;
650
651 if (cppr <= new_state.pending_pri) {
652 reject = new_state.xisr;
653 new_state.xisr = 0;
654 new_state.pending_pri = 0xff;
655 }
656
657 } while (!icp_rm_try_update(icp, old_state, new_state));
658
659 /*
660 * Check for rejects. They are handled by doing a new delivery
661 * attempt (see comments in icp_rm_deliver_irq).
662 */
663 if (reject && reject != XICS_IPI) {
664 icp->n_reject++;
665 icp_rm_deliver_irq(xics, icp, reject);
666 }
667 bail:
668 return check_too_hard(xics, icp);
669 }
670
671 int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
672 {
673 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
674 struct kvmppc_icp *icp = vcpu->arch.icp;
675 struct kvmppc_ics *ics;
676 struct ics_irq_state *state;
677 u32 irq = xirr & 0x00ffffff;
678 u16 src;
679
680 if (!xics || !xics->real_mode)
681 return H_TOO_HARD;
682
683 /*
684 * ICP State: EOI
685 *
686 * Note: If EOI is incorrectly used by SW to lower the CPPR
687 * value (ie more favored), we do not check for rejection of
688 * a pending interrupt, this is a SW error and PAPR sepcifies
689 * that we don't have to deal with it.
690 *
691 * The sending of an EOI to the ICS is handled after the
692 * CPPR update
693 *
694 * ICP State: Down_CPPR which we handle
695 * in a separate function as it's shared with H_CPPR.
696 */
697 icp_rm_down_cppr(xics, icp, xirr >> 24);
698
699 /* IPIs have no EOI */
700 if (irq == XICS_IPI)
701 goto bail;
702 /*
703 * EOI handling: If the interrupt is still asserted, we need to
704 * resend it. We can take a lockless "peek" at the ICS state here.
705 *
706 * "Message" interrupts will never have "asserted" set
707 */
708 ics = kvmppc_xics_find_ics(xics, irq, &src);
709 if (!ics)
710 goto bail;
711 state = &ics->irq_state[src];
712
713 /* Still asserted, resend it */
714 if (state->asserted) {
715 icp->n_reject++;
716 icp_rm_deliver_irq(xics, icp, irq);
717 }
718
719 if (!hlist_empty(&vcpu->kvm->irq_ack_notifier_list)) {
720 icp->rm_action |= XICS_RM_NOTIFY_EOI;
721 icp->rm_eoied_irq = irq;
722 }
723
724 if (state->host_irq) {
725 ++vcpu->stat.pthru_all;
726 if (state->intr_cpu != -1) {
727 int pcpu = raw_smp_processor_id();
728
729 pcpu = cpu_first_thread_sibling(pcpu);
730 ++vcpu->stat.pthru_host;
731 if (state->intr_cpu != pcpu) {
732 ++vcpu->stat.pthru_bad_aff;
733 xics_opal_rm_set_server(state->host_irq, pcpu);
734 }
735 state->intr_cpu = -1;
736 }
737 }
738 bail:
739 return check_too_hard(xics, icp);
740 }
741
742 unsigned long eoi_rc;
743
744 static void icp_eoi(struct irq_chip *c, u32 hwirq, __be32 xirr, bool *again)
745 {
746 unsigned long xics_phys;
747 int64_t rc;
748
749 rc = pnv_opal_pci_msi_eoi(c, hwirq);
750
751 if (rc)
752 eoi_rc = rc;
753
754 iosync();
755
756 /* EOI it */
757 xics_phys = local_paca->kvm_hstate.xics_phys;
758 if (xics_phys) {
759 _stwcix(xics_phys + XICS_XIRR, xirr);
760 } else {
761 rc = opal_rm_int_eoi(be32_to_cpu(xirr));
762 *again = rc > 0;
763 }
764 }
765
766 static int xics_opal_rm_set_server(unsigned int hw_irq, int server_cpu)
767 {
768 unsigned int mangle_cpu = get_hard_smp_processor_id(server_cpu) << 2;
769
770 return opal_rm_set_xive(hw_irq, mangle_cpu, DEFAULT_PRIORITY);
771 }
772
773 /*
774 * Increment a per-CPU 32-bit unsigned integer variable.
775 * Safe to call in real-mode. Handles vmalloc'ed addresses
776 *
777 * ToDo: Make this work for any integral type
778 */
779
780 static inline void this_cpu_inc_rm(unsigned int __percpu *addr)
781 {
782 unsigned long l;
783 unsigned int *raddr;
784 int cpu = smp_processor_id();
785
786 raddr = per_cpu_ptr(addr, cpu);
787 l = (unsigned long)raddr;
788
789 if (REGION_ID(l) == VMALLOC_REGION_ID) {
790 l = vmalloc_to_phys(raddr);
791 raddr = (unsigned int *)l;
792 }
793 ++*raddr;
794 }
795
796 /*
797 * We don't try to update the flags in the irq_desc 'istate' field in
798 * here as would happen in the normal IRQ handling path for several reasons:
799 * - state flags represent internal IRQ state and are not expected to be
800 * updated outside the IRQ subsystem
801 * - more importantly, these are useful for edge triggered interrupts,
802 * IRQ probing, etc., but we are only handling MSI/MSIx interrupts here
803 * and these states shouldn't apply to us.
804 *
805 * However, we do update irq_stats - we somewhat duplicate the code in
806 * kstat_incr_irqs_this_cpu() for this since this function is defined
807 * in irq/internal.h which we don't want to include here.
808 * The only difference is that desc->kstat_irqs is an allocated per CPU
809 * variable and could have been vmalloc'ed, so we can't directly
810 * call __this_cpu_inc() on it. The kstat structure is a static
811 * per CPU variable and it should be accessible by real-mode KVM.
812 *
813 */
814 static void kvmppc_rm_handle_irq_desc(struct irq_desc *desc)
815 {
816 this_cpu_inc_rm(desc->kstat_irqs);
817 __this_cpu_inc(kstat.irqs_sum);
818 }
819
820 long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu,
821 __be32 xirr,
822 struct kvmppc_irq_map *irq_map,
823 struct kvmppc_passthru_irqmap *pimap,
824 bool *again)
825 {
826 struct kvmppc_xics *xics;
827 struct kvmppc_icp *icp;
828 u32 irq;
829
830 irq = irq_map->v_hwirq;
831 xics = vcpu->kvm->arch.xics;
832 icp = vcpu->arch.icp;
833
834 kvmppc_rm_handle_irq_desc(irq_map->desc);
835 icp_rm_deliver_irq(xics, icp, irq);
836
837 /* EOI the interrupt */
838 icp_eoi(irq_desc_get_chip(irq_map->desc), irq_map->r_hwirq, xirr,
839 again);
840
841 if (check_too_hard(xics, icp) == H_TOO_HARD)
842 return 2;
843 else
844 return -2;
845 }
846
847 /* --- Non-real mode XICS-related built-in routines --- */
848
849 /**
850 * Host Operations poked by RM KVM
851 */
852 static void rm_host_ipi_action(int action, void *data)
853 {
854 switch (action) {
855 case XICS_RM_KICK_VCPU:
856 kvmppc_host_rm_ops_hv->vcpu_kick(data);
857 break;
858 default:
859 WARN(1, "Unexpected rm_action=%d data=%p\n", action, data);
860 break;
861 }
862
863 }
864
865 void kvmppc_xics_ipi_action(void)
866 {
867 int core;
868 unsigned int cpu = smp_processor_id();
869 struct kvmppc_host_rm_core *rm_corep;
870
871 core = cpu >> threads_shift;
872 rm_corep = &kvmppc_host_rm_ops_hv->rm_core[core];
873
874 if (rm_corep->rm_data) {
875 rm_host_ipi_action(rm_corep->rm_state.rm_action,
876 rm_corep->rm_data);
877 /* Order these stores against the real mode KVM */
878 rm_corep->rm_data = NULL;
879 smp_wmb();
880 rm_corep->rm_state.rm_action = 0;
881 }
882 }