]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/powerpc/kvm/book3s_xics.c
Replace <asm/uaccess.h> with <linux/uaccess.h> globally
[mirror_ubuntu-artful-kernel.git] / arch / powerpc / kvm / book3s_xics.c
CommitLineData
bc5ad3f3
BH
1/*
2 * Copyright 2012 Michael Ellerman, IBM Corporation.
3 * Copyright 2012 Benjamin Herrenschmidt, IBM Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/kernel.h>
11#include <linux/kvm_host.h>
12#include <linux/err.h>
13#include <linux/gfp.h>
5975a2e0 14#include <linux/anon_inodes.h>
433c5c20 15#include <linux/spinlock.h>
bc5ad3f3 16
7c0f6ba6 17#include <linux/uaccess.h>
bc5ad3f3
BH
18#include <asm/kvm_book3s.h>
19#include <asm/kvm_ppc.h>
20#include <asm/hvcall.h>
21#include <asm/xics.h>
22#include <asm/debug.h>
7bfa9ad5 23#include <asm/time.h>
bc5ad3f3
BH
24
25#include <linux/debugfs.h>
26#include <linux/seq_file.h>
27
28#include "book3s_xics.h"
29
30#if 1
31#define XICS_DBG(fmt...) do { } while (0)
32#else
33#define XICS_DBG(fmt...) trace_printk(fmt)
34#endif
35
e7d26f28
BH
36#define ENABLE_REALMODE true
37#define DEBUG_REALMODE false
38
bc5ad3f3
BH
39/*
40 * LOCKING
41 * =======
42 *
34cb7954 43 * Each ICS has a spin lock protecting the information about the IRQ
4e33d1f0 44 * sources and avoiding simultaneous deliveries of the same interrupt.
bc5ad3f3
BH
45 *
46 * ICP operations are done via a single compare & swap transaction
47 * (most ICP state fits in the union kvmppc_icp_state)
48 */
49
50/*
51 * TODO
52 * ====
53 *
54 * - To speed up resends, keep a bitmap of "resend" set bits in the
55 * ICS
56 *
57 * - Speed up server# -> ICP lookup (array ? hash table ?)
58 *
59 * - Make ICS lockless as well, or at least a per-interrupt lock or hashed
60 * locks array to improve scalability
bc5ad3f3
BH
61 */
62
63/* -- ICS routines -- */
64
65static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
66 u32 new_irq);
67
25a2150b
PM
68/*
69 * Return value ideally indicates how the interrupt was handled, but no
70 * callers look at it (given that we don't implement KVM_IRQ_LINE_STATUS),
71 * so just return 0.
72 */
73static int ics_deliver_irq(struct kvmppc_xics *xics, u32 irq, u32 level)
bc5ad3f3
BH
74{
75 struct ics_irq_state *state;
76 struct kvmppc_ics *ics;
77 u16 src;
78
79 XICS_DBG("ics deliver %#x (level: %d)\n", irq, level);
80
81 ics = kvmppc_xics_find_ics(xics, irq, &src);
82 if (!ics) {
83 XICS_DBG("ics_deliver_irq: IRQ 0x%06x not found !\n", irq);
84 return -EINVAL;
85 }
86 state = &ics->irq_state[src];
87 if (!state->exists)
88 return -EINVAL;
89
90 /*
91 * We set state->asserted locklessly. This should be fine as
92 * we are the only setter, thus concurrent access is undefined
93 * to begin with.
94 */
b1a4286b 95 if ((level == 1 && state->lsi) || level == KVM_INTERRUPT_SET_LEVEL)
bc5ad3f3 96 state->asserted = 1;
25a2150b 97 else if (level == 0 || level == KVM_INTERRUPT_UNSET) {
bc5ad3f3
BH
98 state->asserted = 0;
99 return 0;
100 }
101
5d375199
PM
102 /* Record which CPU this arrived on for passed-through interrupts */
103 if (state->host_irq)
104 state->intr_cpu = raw_smp_processor_id();
105
bc5ad3f3
BH
106 /* Attempt delivery */
107 icp_deliver_irq(xics, NULL, irq);
108
25a2150b 109 return 0;
bc5ad3f3
BH
110}
111
112static void ics_check_resend(struct kvmppc_xics *xics, struct kvmppc_ics *ics,
113 struct kvmppc_icp *icp)
114{
115 int i;
116
34cb7954
SW
117 unsigned long flags;
118
119 local_irq_save(flags);
120 arch_spin_lock(&ics->lock);
bc5ad3f3
BH
121
122 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
123 struct ics_irq_state *state = &ics->irq_state[i];
124
125 if (!state->resend)
126 continue;
127
128 XICS_DBG("resend %#x prio %#x\n", state->number,
129 state->priority);
130
34cb7954
SW
131 arch_spin_unlock(&ics->lock);
132 local_irq_restore(flags);
bc5ad3f3 133 icp_deliver_irq(xics, icp, state->number);
34cb7954
SW
134 local_irq_save(flags);
135 arch_spin_lock(&ics->lock);
bc5ad3f3
BH
136 }
137
34cb7954
SW
138 arch_spin_unlock(&ics->lock);
139 local_irq_restore(flags);
bc5ad3f3
BH
140}
141
d19bd862
PM
142static bool write_xive(struct kvmppc_xics *xics, struct kvmppc_ics *ics,
143 struct ics_irq_state *state,
144 u32 server, u32 priority, u32 saved_priority)
145{
146 bool deliver;
34cb7954 147 unsigned long flags;
d19bd862 148
34cb7954
SW
149 local_irq_save(flags);
150 arch_spin_lock(&ics->lock);
d19bd862
PM
151
152 state->server = server;
153 state->priority = priority;
154 state->saved_priority = saved_priority;
155 deliver = false;
156 if ((state->masked_pending || state->resend) && priority != MASKED) {
157 state->masked_pending = 0;
158 deliver = true;
159 }
160
34cb7954
SW
161 arch_spin_unlock(&ics->lock);
162 local_irq_restore(flags);
d19bd862
PM
163
164 return deliver;
165}
166
bc5ad3f3
BH
167int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server, u32 priority)
168{
169 struct kvmppc_xics *xics = kvm->arch.xics;
170 struct kvmppc_icp *icp;
171 struct kvmppc_ics *ics;
172 struct ics_irq_state *state;
173 u16 src;
bc5ad3f3
BH
174
175 if (!xics)
176 return -ENODEV;
177
178 ics = kvmppc_xics_find_ics(xics, irq, &src);
179 if (!ics)
180 return -EINVAL;
181 state = &ics->irq_state[src];
182
183 icp = kvmppc_xics_find_server(kvm, server);
184 if (!icp)
185 return -EINVAL;
186
bc5ad3f3
BH
187 XICS_DBG("set_xive %#x server %#x prio %#x MP:%d RS:%d\n",
188 irq, server, priority,
189 state->masked_pending, state->resend);
190
d19bd862 191 if (write_xive(xics, ics, state, server, priority, priority))
bc5ad3f3
BH
192 icp_deliver_irq(xics, icp, irq);
193
194 return 0;
195}
196
197int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server, u32 *priority)
198{
199 struct kvmppc_xics *xics = kvm->arch.xics;
200 struct kvmppc_ics *ics;
201 struct ics_irq_state *state;
202 u16 src;
34cb7954 203 unsigned long flags;
bc5ad3f3
BH
204
205 if (!xics)
206 return -ENODEV;
207
208 ics = kvmppc_xics_find_ics(xics, irq, &src);
209 if (!ics)
210 return -EINVAL;
211 state = &ics->irq_state[src];
212
34cb7954
SW
213 local_irq_save(flags);
214 arch_spin_lock(&ics->lock);
bc5ad3f3
BH
215 *server = state->server;
216 *priority = state->priority;
34cb7954
SW
217 arch_spin_unlock(&ics->lock);
218 local_irq_restore(flags);
bc5ad3f3
BH
219
220 return 0;
221}
222
d19bd862
PM
223int kvmppc_xics_int_on(struct kvm *kvm, u32 irq)
224{
225 struct kvmppc_xics *xics = kvm->arch.xics;
226 struct kvmppc_icp *icp;
227 struct kvmppc_ics *ics;
228 struct ics_irq_state *state;
229 u16 src;
230
231 if (!xics)
232 return -ENODEV;
233
234 ics = kvmppc_xics_find_ics(xics, irq, &src);
235 if (!ics)
236 return -EINVAL;
237 state = &ics->irq_state[src];
238
239 icp = kvmppc_xics_find_server(kvm, state->server);
240 if (!icp)
241 return -EINVAL;
242
243 if (write_xive(xics, ics, state, state->server, state->saved_priority,
244 state->saved_priority))
245 icp_deliver_irq(xics, icp, irq);
246
247 return 0;
248}
249
250int kvmppc_xics_int_off(struct kvm *kvm, u32 irq)
251{
252 struct kvmppc_xics *xics = kvm->arch.xics;
253 struct kvmppc_ics *ics;
254 struct ics_irq_state *state;
255 u16 src;
256
257 if (!xics)
258 return -ENODEV;
259
260 ics = kvmppc_xics_find_ics(xics, irq, &src);
261 if (!ics)
262 return -EINVAL;
263 state = &ics->irq_state[src];
264
265 write_xive(xics, ics, state, state->server, MASKED, state->priority);
266
267 return 0;
268}
269
bc5ad3f3
BH
270/* -- ICP routines, including hcalls -- */
271
272static inline bool icp_try_update(struct kvmppc_icp *icp,
273 union kvmppc_icp_state old,
274 union kvmppc_icp_state new,
275 bool change_self)
276{
277 bool success;
278
279 /* Calculate new output value */
280 new.out_ee = (new.xisr && (new.pending_pri < new.cppr));
281
282 /* Attempt atomic update */
283 success = cmpxchg64(&icp->state.raw, old.raw, new.raw) == old.raw;
284 if (!success)
285 goto bail;
286
ade3ac66 287 XICS_DBG("UPD [%04lx] - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n",
bc5ad3f3
BH
288 icp->server_num,
289 old.cppr, old.mfrr, old.pending_pri, old.xisr,
290 old.need_resend, old.out_ee);
291 XICS_DBG("UPD - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n",
292 new.cppr, new.mfrr, new.pending_pri, new.xisr,
293 new.need_resend, new.out_ee);
294 /*
295 * Check for output state update
296 *
297 * Note that this is racy since another processor could be updating
298 * the state already. This is why we never clear the interrupt output
299 * here, we only ever set it. The clear only happens prior to doing
300 * an update and only by the processor itself. Currently we do it
301 * in Accept (H_XIRR) and Up_Cppr (H_XPPR).
302 *
303 * We also do not try to figure out whether the EE state has changed,
e7d26f28
BH
304 * we unconditionally set it if the new state calls for it. The reason
305 * for that is that we opportunistically remove the pending interrupt
306 * flag when raising CPPR, so we need to set it back here if an
307 * interrupt is still pending.
bc5ad3f3
BH
308 */
309 if (new.out_ee) {
310 kvmppc_book3s_queue_irqprio(icp->vcpu,
311 BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
312 if (!change_self)
54695c30 313 kvmppc_fast_vcpu_kick(icp->vcpu);
bc5ad3f3
BH
314 }
315 bail:
316 return success;
317}
318
319static void icp_check_resend(struct kvmppc_xics *xics,
320 struct kvmppc_icp *icp)
321{
322 u32 icsid;
323
324 /* Order this load with the test for need_resend in the caller */
325 smp_rmb();
326 for_each_set_bit(icsid, icp->resend_map, xics->max_icsid + 1) {
327 struct kvmppc_ics *ics = xics->ics[icsid];
328
329 if (!test_and_clear_bit(icsid, icp->resend_map))
330 continue;
331 if (!ics)
332 continue;
333 ics_check_resend(xics, ics, icp);
334 }
335}
336
337static bool icp_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority,
338 u32 *reject)
339{
340 union kvmppc_icp_state old_state, new_state;
341 bool success;
342
ade3ac66 343 XICS_DBG("try deliver %#x(P:%#x) to server %#lx\n", irq, priority,
bc5ad3f3
BH
344 icp->server_num);
345
346 do {
5ee07612 347 old_state = new_state = READ_ONCE(icp->state);
bc5ad3f3
BH
348
349 *reject = 0;
350
351 /* See if we can deliver */
352 success = new_state.cppr > priority &&
353 new_state.mfrr > priority &&
354 new_state.pending_pri > priority;
355
356 /*
357 * If we can, check for a rejection and perform the
358 * delivery
359 */
360 if (success) {
361 *reject = new_state.xisr;
362 new_state.xisr = irq;
363 new_state.pending_pri = priority;
364 } else {
365 /*
366 * If we failed to deliver we set need_resend
367 * so a subsequent CPPR state change causes us
368 * to try a new delivery.
369 */
370 new_state.need_resend = true;
371 }
372
373 } while (!icp_try_update(icp, old_state, new_state, false));
374
375 return success;
376}
377
378static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
379 u32 new_irq)
380{
381 struct ics_irq_state *state;
382 struct kvmppc_ics *ics;
383 u32 reject;
384 u16 src;
34cb7954 385 unsigned long flags;
bc5ad3f3
BH
386
387 /*
388 * This is used both for initial delivery of an interrupt and
389 * for subsequent rejection.
390 *
391 * Rejection can be racy vs. resends. We have evaluated the
392 * rejection in an atomic ICP transaction which is now complete,
393 * so potentially the ICP can already accept the interrupt again.
394 *
395 * So we need to retry the delivery. Essentially the reject path
396 * boils down to a failed delivery. Always.
397 *
398 * Now the interrupt could also have moved to a different target,
399 * thus we may need to re-do the ICP lookup as well
400 */
401
402 again:
403 /* Get the ICS state and lock it */
404 ics = kvmppc_xics_find_ics(xics, new_irq, &src);
405 if (!ics) {
406 XICS_DBG("icp_deliver_irq: IRQ 0x%06x not found !\n", new_irq);
407 return;
408 }
409 state = &ics->irq_state[src];
410
411 /* Get a lock on the ICS */
34cb7954
SW
412 local_irq_save(flags);
413 arch_spin_lock(&ics->lock);
bc5ad3f3
BH
414
415 /* Get our server */
416 if (!icp || state->server != icp->server_num) {
417 icp = kvmppc_xics_find_server(xics->kvm, state->server);
418 if (!icp) {
419 pr_warn("icp_deliver_irq: IRQ 0x%06x server 0x%x not found !\n",
420 new_irq, state->server);
421 goto out;
422 }
423 }
424
425 /* Clear the resend bit of that interrupt */
426 state->resend = 0;
427
428 /*
429 * If masked, bail out
430 *
431 * Note: PAPR doesn't mention anything about masked pending
432 * when doing a resend, only when doing a delivery.
433 *
434 * However that would have the effect of losing a masked
435 * interrupt that was rejected and isn't consistent with
436 * the whole masked_pending business which is about not
437 * losing interrupts that occur while masked.
438 *
446957ba 439 * I don't differentiate normal deliveries and resends, this
bc5ad3f3
BH
440 * implementation will differ from PAPR and not lose such
441 * interrupts.
442 */
443 if (state->priority == MASKED) {
444 XICS_DBG("irq %#x masked pending\n", new_irq);
445 state->masked_pending = 1;
446 goto out;
447 }
448
449 /*
450 * Try the delivery, this will set the need_resend flag
451 * in the ICP as part of the atomic transaction if the
452 * delivery is not possible.
453 *
454 * Note that if successful, the new delivery might have itself
455 * rejected an interrupt that was "delivered" before we took the
34cb7954 456 * ics spin lock.
bc5ad3f3
BH
457 *
458 * In this case we do the whole sequence all over again for the
459 * new guy. We cannot assume that the rejected interrupt is less
460 * favored than the new one, and thus doesn't need to be delivered,
461 * because by the time we exit icp_try_to_deliver() the target
462 * processor may well have alrady consumed & completed it, and thus
463 * the rejected interrupt might actually be already acceptable.
464 */
465 if (icp_try_to_deliver(icp, new_irq, state->priority, &reject)) {
466 /*
467 * Delivery was successful, did we reject somebody else ?
468 */
469 if (reject && reject != XICS_IPI) {
34cb7954
SW
470 arch_spin_unlock(&ics->lock);
471 local_irq_restore(flags);
bc5ad3f3
BH
472 new_irq = reject;
473 goto again;
474 }
475 } else {
476 /*
477 * We failed to deliver the interrupt we need to set the
478 * resend map bit and mark the ICS state as needing a resend
479 */
480 set_bit(ics->icsid, icp->resend_map);
481 state->resend = 1;
482
483 /*
484 * If the need_resend flag got cleared in the ICP some time
485 * between icp_try_to_deliver() atomic update and now, then
486 * we know it might have missed the resend_map bit. So we
487 * retry
488 */
489 smp_mb();
490 if (!icp->state.need_resend) {
34cb7954
SW
491 arch_spin_unlock(&ics->lock);
492 local_irq_restore(flags);
bc5ad3f3
BH
493 goto again;
494 }
495 }
496 out:
34cb7954
SW
497 arch_spin_unlock(&ics->lock);
498 local_irq_restore(flags);
bc5ad3f3
BH
499}
500
501static void icp_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
502 u8 new_cppr)
503{
504 union kvmppc_icp_state old_state, new_state;
505 bool resend;
506
507 /*
508 * This handles several related states in one operation:
509 *
510 * ICP State: Down_CPPR
511 *
512 * Load CPPR with new value and if the XISR is 0
513 * then check for resends:
514 *
515 * ICP State: Resend
516 *
517 * If MFRR is more favored than CPPR, check for IPIs
518 * and notify ICS of a potential resend. This is done
519 * asynchronously (when used in real mode, we will have
520 * to exit here).
521 *
522 * We do not handle the complete Check_IPI as documented
523 * here. In the PAPR, this state will be used for both
524 * Set_MFRR and Down_CPPR. However, we know that we aren't
525 * changing the MFRR state here so we don't need to handle
526 * the case of an MFRR causing a reject of a pending irq,
527 * this will have been handled when the MFRR was set in the
528 * first place.
529 *
530 * Thus we don't have to handle rejects, only resends.
531 *
532 * When implementing real mode for HV KVM, resend will lead to
533 * a H_TOO_HARD return and the whole transaction will be handled
534 * in virtual mode.
535 */
536 do {
5ee07612 537 old_state = new_state = READ_ONCE(icp->state);
bc5ad3f3
BH
538
539 /* Down_CPPR */
540 new_state.cppr = new_cppr;
541
542 /*
543 * Cut down Resend / Check_IPI / IPI
544 *
545 * The logic is that we cannot have a pending interrupt
546 * trumped by an IPI at this point (see above), so we
547 * know that either the pending interrupt is already an
548 * IPI (in which case we don't care to override it) or
549 * it's either more favored than us or non existent
550 */
551 if (new_state.mfrr < new_cppr &&
552 new_state.mfrr <= new_state.pending_pri) {
553 WARN_ON(new_state.xisr != XICS_IPI &&
554 new_state.xisr != 0);
555 new_state.pending_pri = new_state.mfrr;
556 new_state.xisr = XICS_IPI;
557 }
558
559 /* Latch/clear resend bit */
560 resend = new_state.need_resend;
561 new_state.need_resend = 0;
562
563 } while (!icp_try_update(icp, old_state, new_state, true));
564
565 /*
566 * Now handle resend checks. Those are asynchronous to the ICP
567 * state update in HW (ie bus transactions) so we can handle them
568 * separately here too
569 */
570 if (resend)
571 icp_check_resend(xics, icp);
572}
573
e7d26f28 574static noinline unsigned long kvmppc_h_xirr(struct kvm_vcpu *vcpu)
bc5ad3f3
BH
575{
576 union kvmppc_icp_state old_state, new_state;
577 struct kvmppc_icp *icp = vcpu->arch.icp;
578 u32 xirr;
579
580 /* First, remove EE from the processor */
581 kvmppc_book3s_dequeue_irqprio(icp->vcpu,
582 BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
583
584 /*
585 * ICP State: Accept_Interrupt
586 *
587 * Return the pending interrupt (if any) along with the
588 * current CPPR, then clear the XISR & set CPPR to the
589 * pending priority
590 */
591 do {
5ee07612 592 old_state = new_state = READ_ONCE(icp->state);
bc5ad3f3
BH
593
594 xirr = old_state.xisr | (((u32)old_state.cppr) << 24);
595 if (!old_state.xisr)
596 break;
597 new_state.cppr = new_state.pending_pri;
598 new_state.pending_pri = 0xff;
599 new_state.xisr = 0;
600
601 } while (!icp_try_update(icp, old_state, new_state, true));
602
603 XICS_DBG("h_xirr vcpu %d xirr %#x\n", vcpu->vcpu_id, xirr);
604
605 return xirr;
606}
607
e7d26f28
BH
608static noinline int kvmppc_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
609 unsigned long mfrr)
bc5ad3f3
BH
610{
611 union kvmppc_icp_state old_state, new_state;
612 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
613 struct kvmppc_icp *icp;
614 u32 reject;
615 bool resend;
616 bool local;
617
618 XICS_DBG("h_ipi vcpu %d to server %lu mfrr %#lx\n",
619 vcpu->vcpu_id, server, mfrr);
620
621 icp = vcpu->arch.icp;
622 local = icp->server_num == server;
623 if (!local) {
624 icp = kvmppc_xics_find_server(vcpu->kvm, server);
625 if (!icp)
626 return H_PARAMETER;
627 }
628
629 /*
630 * ICP state: Set_MFRR
631 *
632 * If the CPPR is more favored than the new MFRR, then
633 * nothing needs to be rejected as there can be no XISR to
634 * reject. If the MFRR is being made less favored then
635 * there might be a previously-rejected interrupt needing
636 * to be resent.
637 *
5b88cda6
SW
638 * ICP state: Check_IPI
639 *
bc5ad3f3 640 * If the CPPR is less favored, then we might be replacing
5b88cda6 641 * an interrupt, and thus need to possibly reject it.
bc5ad3f3 642 *
5b88cda6
SW
643 * ICP State: IPI
644 *
645 * Besides rejecting any pending interrupts, we also
646 * update XISR and pending_pri to mark IPI as pending.
647 *
648 * PAPR does not describe this state, but if the MFRR is being
649 * made less favored than its earlier value, there might be
650 * a previously-rejected interrupt needing to be resent.
651 * Ideally, we would want to resend only if
652 * prio(pending_interrupt) < mfrr &&
653 * prio(pending_interrupt) < cppr
654 * where pending interrupt is the one that was rejected. But
655 * we don't have that state, so we simply trigger a resend
656 * whenever the MFRR is made less favored.
bc5ad3f3
BH
657 */
658 do {
5ee07612 659 old_state = new_state = READ_ONCE(icp->state);
bc5ad3f3
BH
660
661 /* Set_MFRR */
662 new_state.mfrr = mfrr;
663
664 /* Check_IPI */
665 reject = 0;
666 resend = false;
667 if (mfrr < new_state.cppr) {
668 /* Reject a pending interrupt if not an IPI */
5b88cda6 669 if (mfrr <= new_state.pending_pri) {
bc5ad3f3 670 reject = new_state.xisr;
5b88cda6
SW
671 new_state.pending_pri = mfrr;
672 new_state.xisr = XICS_IPI;
673 }
bc5ad3f3
BH
674 }
675
5b88cda6 676 if (mfrr > old_state.mfrr) {
bc5ad3f3
BH
677 resend = new_state.need_resend;
678 new_state.need_resend = 0;
679 }
680 } while (!icp_try_update(icp, old_state, new_state, local));
681
682 /* Handle reject */
683 if (reject && reject != XICS_IPI)
684 icp_deliver_irq(xics, icp, reject);
685
686 /* Handle resend */
687 if (resend)
688 icp_check_resend(xics, icp);
689
690 return H_SUCCESS;
691}
692
8e44ddc3
PM
693static int kvmppc_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
694{
695 union kvmppc_icp_state state;
696 struct kvmppc_icp *icp;
697
698 icp = vcpu->arch.icp;
699 if (icp->server_num != server) {
700 icp = kvmppc_xics_find_server(vcpu->kvm, server);
701 if (!icp)
702 return H_PARAMETER;
703 }
5ee07612 704 state = READ_ONCE(icp->state);
8e44ddc3
PM
705 kvmppc_set_gpr(vcpu, 4, ((u32)state.cppr << 24) | state.xisr);
706 kvmppc_set_gpr(vcpu, 5, state.mfrr);
707 return H_SUCCESS;
708}
709
e7d26f28 710static noinline void kvmppc_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
bc5ad3f3
BH
711{
712 union kvmppc_icp_state old_state, new_state;
713 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
714 struct kvmppc_icp *icp = vcpu->arch.icp;
715 u32 reject;
716
717 XICS_DBG("h_cppr vcpu %d cppr %#lx\n", vcpu->vcpu_id, cppr);
718
719 /*
720 * ICP State: Set_CPPR
721 *
722 * We can safely compare the new value with the current
723 * value outside of the transaction as the CPPR is only
724 * ever changed by the processor on itself
725 */
726 if (cppr > icp->state.cppr)
727 icp_down_cppr(xics, icp, cppr);
728 else if (cppr == icp->state.cppr)
729 return;
730
731 /*
732 * ICP State: Up_CPPR
733 *
734 * The processor is raising its priority, this can result
735 * in a rejection of a pending interrupt:
736 *
737 * ICP State: Reject_Current
738 *
739 * We can remove EE from the current processor, the update
740 * transaction will set it again if needed
741 */
742 kvmppc_book3s_dequeue_irqprio(icp->vcpu,
743 BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
744
745 do {
5ee07612 746 old_state = new_state = READ_ONCE(icp->state);
bc5ad3f3
BH
747
748 reject = 0;
749 new_state.cppr = cppr;
750
751 if (cppr <= new_state.pending_pri) {
752 reject = new_state.xisr;
753 new_state.xisr = 0;
754 new_state.pending_pri = 0xff;
755 }
756
757 } while (!icp_try_update(icp, old_state, new_state, true));
758
759 /*
760 * Check for rejects. They are handled by doing a new delivery
761 * attempt (see comments in icp_deliver_irq).
762 */
763 if (reject && reject != XICS_IPI)
764 icp_deliver_irq(xics, icp, reject);
765}
766
e7d26f28 767static noinline int kvmppc_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
bc5ad3f3
BH
768{
769 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
770 struct kvmppc_icp *icp = vcpu->arch.icp;
771 struct kvmppc_ics *ics;
772 struct ics_irq_state *state;
773 u32 irq = xirr & 0x00ffffff;
774 u16 src;
775
776 XICS_DBG("h_eoi vcpu %d eoi %#lx\n", vcpu->vcpu_id, xirr);
777
778 /*
779 * ICP State: EOI
780 *
781 * Note: If EOI is incorrectly used by SW to lower the CPPR
782 * value (ie more favored), we do not check for rejection of
783 * a pending interrupt, this is a SW error and PAPR sepcifies
784 * that we don't have to deal with it.
785 *
786 * The sending of an EOI to the ICS is handled after the
787 * CPPR update
788 *
789 * ICP State: Down_CPPR which we handle
790 * in a separate function as it's shared with H_CPPR.
791 */
792 icp_down_cppr(xics, icp, xirr >> 24);
793
794 /* IPIs have no EOI */
795 if (irq == XICS_IPI)
796 return H_SUCCESS;
797 /*
798 * EOI handling: If the interrupt is still asserted, we need to
799 * resend it. We can take a lockless "peek" at the ICS state here.
800 *
801 * "Message" interrupts will never have "asserted" set
802 */
803 ics = kvmppc_xics_find_ics(xics, irq, &src);
804 if (!ics) {
805 XICS_DBG("h_eoi: IRQ 0x%06x not found !\n", irq);
806 return H_PARAMETER;
807 }
808 state = &ics->irq_state[src];
809
810 /* Still asserted, resend it */
811 if (state->asserted)
812 icp_deliver_irq(xics, icp, irq);
813
25a2150b
PM
814 kvm_notify_acked_irq(vcpu->kvm, 0, irq);
815
bc5ad3f3
BH
816 return H_SUCCESS;
817}
818
f7af5209 819int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
e7d26f28
BH
820{
821 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
822 struct kvmppc_icp *icp = vcpu->arch.icp;
823
824 XICS_DBG("XICS_RM: H_%x completing, act: %x state: %lx tgt: %p\n",
825 hcall, icp->rm_action, icp->rm_dbgstate.raw, icp->rm_dbgtgt);
826
878610fe
SW
827 if (icp->rm_action & XICS_RM_KICK_VCPU) {
828 icp->n_rm_kick_vcpu++;
e7d26f28 829 kvmppc_fast_vcpu_kick(icp->rm_kick_target);
878610fe
SW
830 }
831 if (icp->rm_action & XICS_RM_CHECK_RESEND) {
832 icp->n_rm_check_resend++;
5b88cda6 833 icp_check_resend(xics, icp->rm_resend_icp);
878610fe
SW
834 }
835 if (icp->rm_action & XICS_RM_REJECT) {
836 icp->n_rm_reject++;
e7d26f28 837 icp_deliver_irq(xics, icp, icp->rm_reject);
878610fe
SW
838 }
839 if (icp->rm_action & XICS_RM_NOTIFY_EOI) {
840 icp->n_rm_notify_eoi++;
25a2150b 841 kvm_notify_acked_irq(vcpu->kvm, 0, icp->rm_eoied_irq);
878610fe 842 }
e7d26f28
BH
843
844 icp->rm_action = 0;
845
846 return H_SUCCESS;
847}
f7af5209 848EXPORT_SYMBOL_GPL(kvmppc_xics_rm_complete);
e7d26f28 849
bc5ad3f3
BH
850int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
851{
e7d26f28 852 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
bc5ad3f3
BH
853 unsigned long res;
854 int rc = H_SUCCESS;
855
856 /* Check if we have an ICP */
e7d26f28 857 if (!xics || !vcpu->arch.icp)
bc5ad3f3
BH
858 return H_HARDWARE;
859
8e44ddc3
PM
860 /* These requests don't have real-mode implementations at present */
861 switch (req) {
862 case H_XIRR_X:
863 res = kvmppc_h_xirr(vcpu);
864 kvmppc_set_gpr(vcpu, 4, res);
865 kvmppc_set_gpr(vcpu, 5, get_tb());
866 return rc;
867 case H_IPOLL:
868 rc = kvmppc_h_ipoll(vcpu, kvmppc_get_gpr(vcpu, 4));
869 return rc;
870 }
871
e7d26f28 872 /* Check for real mode returning too hard */
a78b55d1 873 if (xics->real_mode && is_kvmppc_hv_enabled(vcpu->kvm))
e7d26f28
BH
874 return kvmppc_xics_rm_complete(vcpu, req);
875
bc5ad3f3
BH
876 switch (req) {
877 case H_XIRR:
e7d26f28 878 res = kvmppc_h_xirr(vcpu);
bc5ad3f3
BH
879 kvmppc_set_gpr(vcpu, 4, res);
880 break;
881 case H_CPPR:
e7d26f28 882 kvmppc_h_cppr(vcpu, kvmppc_get_gpr(vcpu, 4));
bc5ad3f3
BH
883 break;
884 case H_EOI:
e7d26f28 885 rc = kvmppc_h_eoi(vcpu, kvmppc_get_gpr(vcpu, 4));
bc5ad3f3
BH
886 break;
887 case H_IPI:
e7d26f28
BH
888 rc = kvmppc_h_ipi(vcpu, kvmppc_get_gpr(vcpu, 4),
889 kvmppc_get_gpr(vcpu, 5));
bc5ad3f3
BH
890 break;
891 }
892
893 return rc;
894}
2ba9f0d8 895EXPORT_SYMBOL_GPL(kvmppc_xics_hcall);
bc5ad3f3
BH
896
897
898/* -- Initialisation code etc. -- */
899
af893c7d
SW
900static void xics_debugfs_irqmap(struct seq_file *m,
901 struct kvmppc_passthru_irqmap *pimap)
902{
903 int i;
904
905 if (!pimap)
906 return;
907 seq_printf(m, "========\nPIRQ mappings: %d maps\n===========\n",
908 pimap->n_mapped);
909 for (i = 0; i < pimap->n_mapped; i++) {
910 seq_printf(m, "r_hwirq=%x, v_hwirq=%x\n",
911 pimap->mapped[i].r_hwirq, pimap->mapped[i].v_hwirq);
912 }
913}
914
bc5ad3f3
BH
915static int xics_debug_show(struct seq_file *m, void *private)
916{
917 struct kvmppc_xics *xics = m->private;
918 struct kvm *kvm = xics->kvm;
919 struct kvm_vcpu *vcpu;
920 int icsid, i;
34cb7954 921 unsigned long flags;
878610fe
SW
922 unsigned long t_rm_kick_vcpu, t_rm_check_resend;
923 unsigned long t_rm_reject, t_rm_notify_eoi;
6e0365b7 924 unsigned long t_reject, t_check_resend;
bc5ad3f3
BH
925
926 if (!kvm)
927 return 0;
928
878610fe
SW
929 t_rm_kick_vcpu = 0;
930 t_rm_notify_eoi = 0;
931 t_rm_check_resend = 0;
932 t_rm_reject = 0;
6e0365b7
SW
933 t_check_resend = 0;
934 t_reject = 0;
878610fe 935
af893c7d
SW
936 xics_debugfs_irqmap(m, kvm->arch.pimap);
937
bc5ad3f3
BH
938 seq_printf(m, "=========\nICP state\n=========\n");
939
940 kvm_for_each_vcpu(i, vcpu, kvm) {
941 struct kvmppc_icp *icp = vcpu->arch.icp;
942 union kvmppc_icp_state state;
943
944 if (!icp)
945 continue;
946
5ee07612 947 state.raw = READ_ONCE(icp->state.raw);
bc5ad3f3
BH
948 seq_printf(m, "cpu server %#lx XIRR:%#x PPRI:%#x CPPR:%#x MFRR:%#x OUT:%d NR:%d\n",
949 icp->server_num, state.xisr,
950 state.pending_pri, state.cppr, state.mfrr,
951 state.out_ee, state.need_resend);
878610fe
SW
952 t_rm_kick_vcpu += icp->n_rm_kick_vcpu;
953 t_rm_notify_eoi += icp->n_rm_notify_eoi;
954 t_rm_check_resend += icp->n_rm_check_resend;
955 t_rm_reject += icp->n_rm_reject;
6e0365b7
SW
956 t_check_resend += icp->n_check_resend;
957 t_reject += icp->n_reject;
bc5ad3f3
BH
958 }
959
6e0365b7 960 seq_printf(m, "ICP Guest->Host totals: kick_vcpu=%lu check_resend=%lu reject=%lu notify_eoi=%lu\n",
878610fe
SW
961 t_rm_kick_vcpu, t_rm_check_resend,
962 t_rm_reject, t_rm_notify_eoi);
6e0365b7
SW
963 seq_printf(m, "ICP Real Mode totals: check_resend=%lu resend=%lu\n",
964 t_check_resend, t_reject);
bc5ad3f3
BH
965 for (icsid = 0; icsid <= KVMPPC_XICS_MAX_ICS_ID; icsid++) {
966 struct kvmppc_ics *ics = xics->ics[icsid];
967
968 if (!ics)
969 continue;
970
971 seq_printf(m, "=========\nICS state for ICS 0x%x\n=========\n",
972 icsid);
973
34cb7954
SW
974 local_irq_save(flags);
975 arch_spin_lock(&ics->lock);
bc5ad3f3
BH
976
977 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
978 struct ics_irq_state *irq = &ics->irq_state[i];
979
980 seq_printf(m, "irq 0x%06x: server %#x prio %#x save prio %#x asserted %d resend %d masked pending %d\n",
981 irq->number, irq->server, irq->priority,
982 irq->saved_priority, irq->asserted,
983 irq->resend, irq->masked_pending);
984
985 }
34cb7954
SW
986 arch_spin_unlock(&ics->lock);
987 local_irq_restore(flags);
bc5ad3f3
BH
988 }
989 return 0;
990}
991
992static int xics_debug_open(struct inode *inode, struct file *file)
993{
994 return single_open(file, xics_debug_show, inode->i_private);
995}
996
997static const struct file_operations xics_debug_fops = {
998 .open = xics_debug_open,
999 .read = seq_read,
1000 .llseek = seq_lseek,
1001 .release = single_release,
1002};
1003
1004static void xics_debugfs_init(struct kvmppc_xics *xics)
1005{
1006 char *name;
1007
1008 name = kasprintf(GFP_KERNEL, "kvm-xics-%p", xics);
1009 if (!name) {
1010 pr_err("%s: no memory for name\n", __func__);
1011 return;
1012 }
1013
1014 xics->dentry = debugfs_create_file(name, S_IRUGO, powerpc_debugfs_root,
1015 xics, &xics_debug_fops);
1016
1017 pr_debug("%s: created %s\n", __func__, name);
1018 kfree(name);
1019}
1020
5975a2e0
PM
1021static struct kvmppc_ics *kvmppc_xics_create_ics(struct kvm *kvm,
1022 struct kvmppc_xics *xics, int irq)
bc5ad3f3
BH
1023{
1024 struct kvmppc_ics *ics;
1025 int i, icsid;
1026
1027 icsid = irq >> KVMPPC_XICS_ICS_SHIFT;
1028
1029 mutex_lock(&kvm->lock);
1030
1031 /* ICS already exists - somebody else got here first */
1032 if (xics->ics[icsid])
1033 goto out;
1034
1035 /* Create the ICS */
1036 ics = kzalloc(sizeof(struct kvmppc_ics), GFP_KERNEL);
1037 if (!ics)
1038 goto out;
1039
bc5ad3f3
BH
1040 ics->icsid = icsid;
1041
1042 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
1043 ics->irq_state[i].number = (icsid << KVMPPC_XICS_ICS_SHIFT) | i;
1044 ics->irq_state[i].priority = MASKED;
1045 ics->irq_state[i].saved_priority = MASKED;
1046 }
1047 smp_wmb();
1048 xics->ics[icsid] = ics;
1049
1050 if (icsid > xics->max_icsid)
1051 xics->max_icsid = icsid;
1052
1053 out:
1054 mutex_unlock(&kvm->lock);
1055 return xics->ics[icsid];
1056}
1057
1058int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu, unsigned long server_num)
1059{
1060 struct kvmppc_icp *icp;
1061
1062 if (!vcpu->kvm->arch.xics)
1063 return -ENODEV;
1064
1065 if (kvmppc_xics_find_server(vcpu->kvm, server_num))
1066 return -EEXIST;
1067
1068 icp = kzalloc(sizeof(struct kvmppc_icp), GFP_KERNEL);
1069 if (!icp)
1070 return -ENOMEM;
1071
1072 icp->vcpu = vcpu;
1073 icp->server_num = server_num;
1074 icp->state.mfrr = MASKED;
1075 icp->state.pending_pri = MASKED;
1076 vcpu->arch.icp = icp;
1077
1078 XICS_DBG("created server for vcpu %d\n", vcpu->vcpu_id);
1079
1080 return 0;
1081}
1082
8b78645c
PM
1083u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu)
1084{
1085 struct kvmppc_icp *icp = vcpu->arch.icp;
1086 union kvmppc_icp_state state;
1087
1088 if (!icp)
1089 return 0;
1090 state = icp->state;
1091 return ((u64)state.cppr << KVM_REG_PPC_ICP_CPPR_SHIFT) |
1092 ((u64)state.xisr << KVM_REG_PPC_ICP_XISR_SHIFT) |
1093 ((u64)state.mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT) |
1094 ((u64)state.pending_pri << KVM_REG_PPC_ICP_PPRI_SHIFT);
1095}
1096
1097int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
1098{
1099 struct kvmppc_icp *icp = vcpu->arch.icp;
1100 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
1101 union kvmppc_icp_state old_state, new_state;
1102 struct kvmppc_ics *ics;
1103 u8 cppr, mfrr, pending_pri;
1104 u32 xisr;
1105 u16 src;
1106 bool resend;
1107
1108 if (!icp || !xics)
1109 return -ENOENT;
1110
1111 cppr = icpval >> KVM_REG_PPC_ICP_CPPR_SHIFT;
1112 xisr = (icpval >> KVM_REG_PPC_ICP_XISR_SHIFT) &
1113 KVM_REG_PPC_ICP_XISR_MASK;
1114 mfrr = icpval >> KVM_REG_PPC_ICP_MFRR_SHIFT;
1115 pending_pri = icpval >> KVM_REG_PPC_ICP_PPRI_SHIFT;
1116
1117 /* Require the new state to be internally consistent */
1118 if (xisr == 0) {
1119 if (pending_pri != 0xff)
1120 return -EINVAL;
1121 } else if (xisr == XICS_IPI) {
1122 if (pending_pri != mfrr || pending_pri >= cppr)
1123 return -EINVAL;
1124 } else {
1125 if (pending_pri >= mfrr || pending_pri >= cppr)
1126 return -EINVAL;
1127 ics = kvmppc_xics_find_ics(xics, xisr, &src);
1128 if (!ics)
1129 return -EINVAL;
1130 }
1131
1132 new_state.raw = 0;
1133 new_state.cppr = cppr;
1134 new_state.xisr = xisr;
1135 new_state.mfrr = mfrr;
1136 new_state.pending_pri = pending_pri;
1137
1138 /*
1139 * Deassert the CPU interrupt request.
1140 * icp_try_update will reassert it if necessary.
1141 */
1142 kvmppc_book3s_dequeue_irqprio(icp->vcpu,
1143 BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
1144
1145 /*
1146 * Note that if we displace an interrupt from old_state.xisr,
1147 * we don't mark it as rejected. We expect userspace to set
1148 * the state of the interrupt sources to be consistent with
1149 * the ICP states (either before or afterwards, which doesn't
1150 * matter). We do handle resends due to CPPR becoming less
1151 * favoured because that is necessary to end up with a
1152 * consistent state in the situation where userspace restores
1153 * the ICS states before the ICP states.
1154 */
1155 do {
5ee07612 1156 old_state = READ_ONCE(icp->state);
8b78645c
PM
1157
1158 if (new_state.mfrr <= old_state.mfrr) {
1159 resend = false;
1160 new_state.need_resend = old_state.need_resend;
1161 } else {
1162 resend = old_state.need_resend;
1163 new_state.need_resend = 0;
1164 }
1165 } while (!icp_try_update(icp, old_state, new_state, false));
1166
1167 if (resend)
1168 icp_check_resend(xics, icp);
1169
1170 return 0;
1171}
1172
5975a2e0
PM
1173static int xics_get_source(struct kvmppc_xics *xics, long irq, u64 addr)
1174{
1175 int ret;
1176 struct kvmppc_ics *ics;
1177 struct ics_irq_state *irqp;
1178 u64 __user *ubufp = (u64 __user *) addr;
1179 u16 idx;
1180 u64 val, prio;
34cb7954 1181 unsigned long flags;
5975a2e0
PM
1182
1183 ics = kvmppc_xics_find_ics(xics, irq, &idx);
1184 if (!ics)
1185 return -ENOENT;
bc5ad3f3 1186
5975a2e0 1187 irqp = &ics->irq_state[idx];
34cb7954
SW
1188 local_irq_save(flags);
1189 arch_spin_lock(&ics->lock);
5975a2e0
PM
1190 ret = -ENOENT;
1191 if (irqp->exists) {
1192 val = irqp->server;
1193 prio = irqp->priority;
1194 if (prio == MASKED) {
1195 val |= KVM_XICS_MASKED;
1196 prio = irqp->saved_priority;
1197 }
1198 val |= prio << KVM_XICS_PRIORITY_SHIFT;
b1a4286b
PM
1199 if (irqp->lsi) {
1200 val |= KVM_XICS_LEVEL_SENSITIVE;
1201 if (irqp->asserted)
1202 val |= KVM_XICS_PENDING;
1203 } else if (irqp->masked_pending || irqp->resend)
5975a2e0
PM
1204 val |= KVM_XICS_PENDING;
1205 ret = 0;
1206 }
34cb7954
SW
1207 arch_spin_unlock(&ics->lock);
1208 local_irq_restore(flags);
5975a2e0
PM
1209
1210 if (!ret && put_user(val, ubufp))
1211 ret = -EFAULT;
1212
1213 return ret;
1214}
1215
1216static int xics_set_source(struct kvmppc_xics *xics, long irq, u64 addr)
bc5ad3f3 1217{
5975a2e0
PM
1218 struct kvmppc_ics *ics;
1219 struct ics_irq_state *irqp;
1220 u64 __user *ubufp = (u64 __user *) addr;
1221 u16 idx;
1222 u64 val;
1223 u8 prio;
1224 u32 server;
34cb7954 1225 unsigned long flags;
5975a2e0
PM
1226
1227 if (irq < KVMPPC_XICS_FIRST_IRQ || irq >= KVMPPC_XICS_NR_IRQS)
1228 return -ENOENT;
1229
1230 ics = kvmppc_xics_find_ics(xics, irq, &idx);
1231 if (!ics) {
1232 ics = kvmppc_xics_create_ics(xics->kvm, xics, irq);
1233 if (!ics)
1234 return -ENOMEM;
1235 }
1236 irqp = &ics->irq_state[idx];
1237 if (get_user(val, ubufp))
1238 return -EFAULT;
1239
1240 server = val & KVM_XICS_DESTINATION_MASK;
1241 prio = val >> KVM_XICS_PRIORITY_SHIFT;
1242 if (prio != MASKED &&
1243 kvmppc_xics_find_server(xics->kvm, server) == NULL)
1244 return -EINVAL;
bc5ad3f3 1245
34cb7954
SW
1246 local_irq_save(flags);
1247 arch_spin_lock(&ics->lock);
5975a2e0
PM
1248 irqp->server = server;
1249 irqp->saved_priority = prio;
1250 if (val & KVM_XICS_MASKED)
1251 prio = MASKED;
1252 irqp->priority = prio;
1253 irqp->resend = 0;
1254 irqp->masked_pending = 0;
b1a4286b 1255 irqp->lsi = 0;
5975a2e0 1256 irqp->asserted = 0;
b1a4286b
PM
1257 if (val & KVM_XICS_LEVEL_SENSITIVE) {
1258 irqp->lsi = 1;
1259 if (val & KVM_XICS_PENDING)
1260 irqp->asserted = 1;
1261 }
5975a2e0 1262 irqp->exists = 1;
34cb7954
SW
1263 arch_spin_unlock(&ics->lock);
1264 local_irq_restore(flags);
bc5ad3f3 1265
5975a2e0
PM
1266 if (val & KVM_XICS_PENDING)
1267 icp_deliver_irq(xics, NULL, irqp->number);
bc5ad3f3 1268
5975a2e0
PM
1269 return 0;
1270}
1271
1272int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
1273 bool line_status)
1274{
1275 struct kvmppc_xics *xics = kvm->arch.xics;
1276
e48ba1cb
PM
1277 if (!xics)
1278 return -ENODEV;
25a2150b
PM
1279 return ics_deliver_irq(xics, irq, level);
1280}
1281
b1a4286b
PM
1282int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *irq_entry,
1283 struct kvm *kvm, int irq_source_id,
1284 int level, bool line_status)
25a2150b 1285{
25a2150b
PM
1286 return kvm_set_irq(kvm, irq_source_id, irq_entry->gsi,
1287 level, line_status);
5975a2e0
PM
1288}
1289
1290static int xics_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1291{
1292 struct kvmppc_xics *xics = dev->private;
1293
1294 switch (attr->group) {
1295 case KVM_DEV_XICS_GRP_SOURCES:
1296 return xics_set_source(xics, attr->attr, attr->addr);
bc5ad3f3 1297 }
5975a2e0
PM
1298 return -ENXIO;
1299}
bc5ad3f3 1300
5975a2e0
PM
1301static int xics_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1302{
1303 struct kvmppc_xics *xics = dev->private;
1304
1305 switch (attr->group) {
1306 case KVM_DEV_XICS_GRP_SOURCES:
1307 return xics_get_source(xics, attr->attr, attr->addr);
1308 }
1309 return -ENXIO;
bc5ad3f3
BH
1310}
1311
5975a2e0 1312static int xics_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
bc5ad3f3 1313{
5975a2e0
PM
1314 switch (attr->group) {
1315 case KVM_DEV_XICS_GRP_SOURCES:
1316 if (attr->attr >= KVMPPC_XICS_FIRST_IRQ &&
1317 attr->attr < KVMPPC_XICS_NR_IRQS)
1318 return 0;
1319 break;
1320 }
1321 return -ENXIO;
1322}
1323
1324static void kvmppc_xics_free(struct kvm_device *dev)
1325{
1326 struct kvmppc_xics *xics = dev->private;
bc5ad3f3
BH
1327 int i;
1328 struct kvm *kvm = xics->kvm;
1329
1330 debugfs_remove(xics->dentry);
1331
1332 if (kvm)
1333 kvm->arch.xics = NULL;
1334
1335 for (i = 0; i <= xics->max_icsid; i++)
1336 kfree(xics->ics[i]);
1337 kfree(xics);
5975a2e0 1338 kfree(dev);
bc5ad3f3
BH
1339}
1340
5975a2e0 1341static int kvmppc_xics_create(struct kvm_device *dev, u32 type)
bc5ad3f3
BH
1342{
1343 struct kvmppc_xics *xics;
5975a2e0 1344 struct kvm *kvm = dev->kvm;
bc5ad3f3
BH
1345 int ret = 0;
1346
1347 xics = kzalloc(sizeof(*xics), GFP_KERNEL);
1348 if (!xics)
1349 return -ENOMEM;
1350
5975a2e0
PM
1351 dev->private = xics;
1352 xics->dev = dev;
bc5ad3f3
BH
1353 xics->kvm = kvm;
1354
1355 /* Already there ? */
bc5ad3f3
BH
1356 if (kvm->arch.xics)
1357 ret = -EEXIST;
1358 else
1359 kvm->arch.xics = xics;
bc5ad3f3 1360
458ff3c0
GN
1361 if (ret) {
1362 kfree(xics);
bc5ad3f3 1363 return ret;
458ff3c0 1364 }
bc5ad3f3 1365
3a167bea 1366#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
e7d26f28
BH
1367 if (cpu_has_feature(CPU_FTR_ARCH_206)) {
1368 /* Enable real mode support */
1369 xics->real_mode = ENABLE_REALMODE;
1370 xics->real_mode_dbg = DEBUG_REALMODE;
1371 }
3a167bea 1372#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
e7d26f28 1373
bc5ad3f3
BH
1374 return 0;
1375}
1376
023e9fdd
CD
1377static void kvmppc_xics_init(struct kvm_device *dev)
1378{
1379 struct kvmppc_xics *xics = (struct kvmppc_xics *)dev->private;
1380
1381 xics_debugfs_init(xics);
1382}
1383
5975a2e0
PM
1384struct kvm_device_ops kvm_xics_ops = {
1385 .name = "kvm-xics",
1386 .create = kvmppc_xics_create,
023e9fdd 1387 .init = kvmppc_xics_init,
5975a2e0
PM
1388 .destroy = kvmppc_xics_free,
1389 .set_attr = xics_set_attr,
1390 .get_attr = xics_get_attr,
1391 .has_attr = xics_has_attr,
1392};
1393
1394int kvmppc_xics_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
1395 u32 xcpu)
1396{
1397 struct kvmppc_xics *xics = dev->private;
1398 int r = -EBUSY;
1399
1400 if (dev->ops != &kvm_xics_ops)
1401 return -EPERM;
1402 if (xics->kvm != vcpu->kvm)
1403 return -EPERM;
1404 if (vcpu->arch.irq_type)
1405 return -EBUSY;
1406
1407 r = kvmppc_xics_create_icp(vcpu, xcpu);
1408 if (!r)
1409 vcpu->arch.irq_type = KVMPPC_IRQ_XICS;
1410
1411 return r;
1412}
1413
bc5ad3f3
BH
1414void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu)
1415{
1416 if (!vcpu->arch.icp)
1417 return;
1418 kfree(vcpu->arch.icp);
1419 vcpu->arch.icp = NULL;
1420 vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT;
1421}
25a2150b
PM
1422
1423static int xics_set_irq(struct kvm_kernel_irq_routing_entry *e,
1424 struct kvm *kvm, int irq_source_id, int level,
1425 bool line_status)
1426{
1427 return kvm_set_irq(kvm, irq_source_id, e->gsi, level, line_status);
1428}
1429
1430int kvm_irq_map_gsi(struct kvm *kvm,
1431 struct kvm_kernel_irq_routing_entry *entries, int gsi)
1432{
1433 entries->gsi = gsi;
1434 entries->type = KVM_IRQ_ROUTING_IRQCHIP;
1435 entries->set = xics_set_irq;
1436 entries->irqchip.irqchip = 0;
1437 entries->irqchip.pin = gsi;
1438 return 1;
1439}
1440
1441int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin)
1442{
1443 return pin;
1444}
5d375199
PM
1445
1446void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long irq,
1447 unsigned long host_irq)
1448{
1449 struct kvmppc_xics *xics = kvm->arch.xics;
1450 struct kvmppc_ics *ics;
1451 u16 idx;
1452
1453 ics = kvmppc_xics_find_ics(xics, irq, &idx);
1454 if (!ics)
1455 return;
1456
1457 ics->irq_state[idx].host_irq = host_irq;
1458 ics->irq_state[idx].intr_cpu = -1;
1459}
1460EXPORT_SYMBOL_GPL(kvmppc_xics_set_mapped);
1461
1462void kvmppc_xics_clr_mapped(struct kvm *kvm, unsigned long irq,
1463 unsigned long host_irq)
1464{
1465 struct kvmppc_xics *xics = kvm->arch.xics;
1466 struct kvmppc_ics *ics;
1467 u16 idx;
1468
1469 ics = kvmppc_xics_find_ics(xics, irq, &idx);
1470 if (!ics)
1471 return;
1472
1473 ics->irq_state[idx].host_irq = 0;
1474}
1475EXPORT_SYMBOL_GPL(kvmppc_xics_clr_mapped);