]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/powerpc/kvm/book3s_xive_template.c
KVM: PPC: Book3S HV: Provide mode where all vCPUs on a core must be the same VM
[mirror_ubuntu-bionic-kernel.git] / arch / powerpc / kvm / book3s_xive_template.c
1 /*
2 * Copyright 2017 Benjamin Herrenschmidt, IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2, as
6 * published by the Free Software Foundation.
7 */
8
9 /* File to be included by other .c files */
10
11 #define XGLUE(a,b) a##b
12 #define GLUE(a,b) XGLUE(a,b)
13
14 static void GLUE(X_PFX,ack_pending)(struct kvmppc_xive_vcpu *xc)
15 {
16 u8 cppr;
17 u16 ack;
18
19 /*
20 * Ensure any previous store to CPPR is ordered vs.
21 * the subsequent loads from PIPR or ACK.
22 */
23 eieio();
24
25 /*
26 * DD1 bug workaround: If PIPR is less favored than CPPR
27 * ignore the interrupt or we might incorrectly lose an IPB
28 * bit.
29 */
30 if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
31 __be64 qw1 = __x_readq(__x_tima + TM_QW1_OS);
32 u8 pipr = be64_to_cpu(qw1) & 0xff;
33 if (pipr >= xc->hw_cppr)
34 return;
35 }
36
37 /* Perform the acknowledge OS to register cycle. */
38 ack = be16_to_cpu(__x_readw(__x_tima + TM_SPC_ACK_OS_REG));
39
40 /* Synchronize subsequent queue accesses */
41 mb();
42
43 /* XXX Check grouping level */
44
45 /* Anything ? */
46 if (!((ack >> 8) & TM_QW1_NSR_EO))
47 return;
48
49 /* Grab CPPR of the most favored pending interrupt */
50 cppr = ack & 0xff;
51 if (cppr < 8)
52 xc->pending |= 1 << cppr;
53
54 #ifdef XIVE_RUNTIME_CHECKS
55 /* Check consistency */
56 if (cppr >= xc->hw_cppr)
57 pr_warn("KVM-XIVE: CPU %d odd ack CPPR, got %d at %d\n",
58 smp_processor_id(), cppr, xc->hw_cppr);
59 #endif
60
61 /*
62 * Update our image of the HW CPPR. We don't yet modify
63 * xc->cppr, this will be done as we scan for interrupts
64 * in the queues.
65 */
66 xc->hw_cppr = cppr;
67 }
68
69 static u8 GLUE(X_PFX,esb_load)(struct xive_irq_data *xd, u32 offset)
70 {
71 u64 val;
72
73 if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG)
74 offset |= offset << 4;
75
76 val =__x_readq(__x_eoi_page(xd) + offset);
77 #ifdef __LITTLE_ENDIAN__
78 val >>= 64-8;
79 #endif
80 return (u8)val;
81 }
82
83
84 static void GLUE(X_PFX,source_eoi)(u32 hw_irq, struct xive_irq_data *xd)
85 {
86 /* If the XIVE supports the new "store EOI facility, use it */
87 if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
88 __x_writeq(0, __x_eoi_page(xd) + XIVE_ESB_STORE_EOI);
89 else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) {
90 opal_int_eoi(hw_irq);
91 } else {
92 uint64_t eoi_val;
93
94 /*
95 * Otherwise for EOI, we use the special MMIO that does
96 * a clear of both P and Q and returns the old Q,
97 * except for LSIs where we use the "EOI cycle" special
98 * load.
99 *
100 * This allows us to then do a re-trigger if Q was set
101 * rather than synthetizing an interrupt in software
102 *
103 * For LSIs, using the HW EOI cycle works around a problem
104 * on P9 DD1 PHBs where the other ESB accesses don't work
105 * properly.
106 */
107 if (xd->flags & XIVE_IRQ_FLAG_LSI)
108 __x_readq(__x_eoi_page(xd) + XIVE_ESB_LOAD_EOI);
109 else {
110 eoi_val = GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_00);
111
112 /* Re-trigger if needed */
113 if ((eoi_val & 1) && __x_trig_page(xd))
114 __x_writeq(0, __x_trig_page(xd));
115 }
116 }
117 }
118
119 enum {
120 scan_fetch,
121 scan_poll,
122 scan_eoi,
123 };
124
125 static u32 GLUE(X_PFX,scan_interrupts)(struct kvmppc_xive_vcpu *xc,
126 u8 pending, int scan_type)
127 {
128 u32 hirq = 0;
129 u8 prio = 0xff;
130
131 /* Find highest pending priority */
132 while ((xc->mfrr != 0xff || pending != 0) && hirq == 0) {
133 struct xive_q *q;
134 u32 idx, toggle;
135 __be32 *qpage;
136
137 /*
138 * If pending is 0 this will return 0xff which is what
139 * we want
140 */
141 prio = ffs(pending) - 1;
142
143 /*
144 * If the most favoured prio we found pending is less
145 * favored (or equal) than a pending IPI, we return
146 * the IPI instead.
147 *
148 * Note: If pending was 0 and mfrr is 0xff, we will
149 * not spurriously take an IPI because mfrr cannot
150 * then be smaller than cppr.
151 */
152 if (prio >= xc->mfrr && xc->mfrr < xc->cppr) {
153 prio = xc->mfrr;
154 hirq = XICS_IPI;
155 break;
156 }
157
158 /* Don't scan past the guest cppr */
159 if (prio >= xc->cppr || prio > 7)
160 break;
161
162 /* Grab queue and pointers */
163 q = &xc->queues[prio];
164 idx = q->idx;
165 toggle = q->toggle;
166
167 /*
168 * Snapshot the queue page. The test further down for EOI
169 * must use the same "copy" that was used by __xive_read_eq
170 * since qpage can be set concurrently and we don't want
171 * to miss an EOI.
172 */
173 qpage = READ_ONCE(q->qpage);
174
175 skip_ipi:
176 /*
177 * Try to fetch from the queue. Will return 0 for a
178 * non-queueing priority (ie, qpage = 0).
179 */
180 hirq = __xive_read_eq(qpage, q->msk, &idx, &toggle);
181
182 /*
183 * If this was a signal for an MFFR change done by
184 * H_IPI we skip it. Additionally, if we were fetching
185 * we EOI it now, thus re-enabling reception of a new
186 * such signal.
187 *
188 * We also need to do that if prio is 0 and we had no
189 * page for the queue. In this case, we have non-queued
190 * IPI that needs to be EOId.
191 *
192 * This is safe because if we have another pending MFRR
193 * change that wasn't observed above, the Q bit will have
194 * been set and another occurrence of the IPI will trigger.
195 */
196 if (hirq == XICS_IPI || (prio == 0 && !qpage)) {
197 if (scan_type == scan_fetch)
198 GLUE(X_PFX,source_eoi)(xc->vp_ipi,
199 &xc->vp_ipi_data);
200 /* Loop back on same queue with updated idx/toggle */
201 #ifdef XIVE_RUNTIME_CHECKS
202 WARN_ON(hirq && hirq != XICS_IPI);
203 #endif
204 if (hirq)
205 goto skip_ipi;
206 }
207
208 /* If fetching, update queue pointers */
209 if (scan_type == scan_fetch) {
210 q->idx = idx;
211 q->toggle = toggle;
212 }
213
214 /* Something found, stop searching */
215 if (hirq)
216 break;
217
218 /* Clear the pending bit on the now empty queue */
219 pending &= ~(1 << prio);
220
221 /*
222 * Check if the queue count needs adjusting due to
223 * interrupts being moved away.
224 */
225 if (atomic_read(&q->pending_count)) {
226 int p = atomic_xchg(&q->pending_count, 0);
227 if (p) {
228 #ifdef XIVE_RUNTIME_CHECKS
229 WARN_ON(p > atomic_read(&q->count));
230 #endif
231 atomic_sub(p, &q->count);
232 }
233 }
234 }
235
236 /* If we are just taking a "peek", do nothing else */
237 if (scan_type == scan_poll)
238 return hirq;
239
240 /* Update the pending bits */
241 xc->pending = pending;
242
243 /*
244 * If this is an EOI that's it, no CPPR adjustment done here,
245 * all we needed was cleanup the stale pending bits and check
246 * if there's anything left.
247 */
248 if (scan_type == scan_eoi)
249 return hirq;
250
251 /*
252 * If we found an interrupt, adjust what the guest CPPR should
253 * be as if we had just fetched that interrupt from HW.
254 *
255 * Note: This can only make xc->cppr smaller as the previous
256 * loop will only exit with hirq != 0 if prio is lower than
257 * the current xc->cppr. Thus we don't need to re-check xc->mfrr
258 * for pending IPIs.
259 */
260 if (hirq)
261 xc->cppr = prio;
262 /*
263 * If it was an IPI the HW CPPR might have been lowered too much
264 * as the HW interrupt we use for IPIs is routed to priority 0.
265 *
266 * We re-sync it here.
267 */
268 if (xc->cppr != xc->hw_cppr) {
269 xc->hw_cppr = xc->cppr;
270 __x_writeb(xc->cppr, __x_tima + TM_QW1_OS + TM_CPPR);
271 }
272
273 return hirq;
274 }
275
276 X_STATIC unsigned long GLUE(X_PFX,h_xirr)(struct kvm_vcpu *vcpu)
277 {
278 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
279 u8 old_cppr;
280 u32 hirq;
281
282 pr_devel("H_XIRR\n");
283
284 xc->GLUE(X_STAT_PFX,h_xirr)++;
285
286 /* First collect pending bits from HW */
287 GLUE(X_PFX,ack_pending)(xc);
288
289 /*
290 * Cleanup the old-style bits if needed (they may have been
291 * set by pull or an escalation interrupts).
292 */
293 if (test_bit(BOOK3S_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions))
294 clear_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL,
295 &vcpu->arch.pending_exceptions);
296
297 pr_devel(" new pending=0x%02x hw_cppr=%d cppr=%d\n",
298 xc->pending, xc->hw_cppr, xc->cppr);
299
300 /* Grab previous CPPR and reverse map it */
301 old_cppr = xive_prio_to_guest(xc->cppr);
302
303 /* Scan for actual interrupts */
304 hirq = GLUE(X_PFX,scan_interrupts)(xc, xc->pending, scan_fetch);
305
306 pr_devel(" got hirq=0x%x hw_cppr=%d cppr=%d\n",
307 hirq, xc->hw_cppr, xc->cppr);
308
309 #ifdef XIVE_RUNTIME_CHECKS
310 /* That should never hit */
311 if (hirq & 0xff000000)
312 pr_warn("XIVE: Weird guest interrupt number 0x%08x\n", hirq);
313 #endif
314
315 /*
316 * XXX We could check if the interrupt is masked here and
317 * filter it. If we chose to do so, we would need to do:
318 *
319 * if (masked) {
320 * lock();
321 * if (masked) {
322 * old_Q = true;
323 * hirq = 0;
324 * }
325 * unlock();
326 * }
327 */
328
329 /* Return interrupt and old CPPR in GPR4 */
330 vcpu->arch.gpr[4] = hirq | (old_cppr << 24);
331
332 return H_SUCCESS;
333 }
334
335 X_STATIC unsigned long GLUE(X_PFX,h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server)
336 {
337 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
338 u8 pending = xc->pending;
339 u32 hirq;
340
341 pr_devel("H_IPOLL(server=%ld)\n", server);
342
343 xc->GLUE(X_STAT_PFX,h_ipoll)++;
344
345 /* Grab the target VCPU if not the current one */
346 if (xc->server_num != server) {
347 vcpu = kvmppc_xive_find_server(vcpu->kvm, server);
348 if (!vcpu)
349 return H_PARAMETER;
350 xc = vcpu->arch.xive_vcpu;
351
352 /* Scan all priorities */
353 pending = 0xff;
354 } else {
355 /* Grab pending interrupt if any */
356 __be64 qw1 = __x_readq(__x_tima + TM_QW1_OS);
357 u8 pipr = be64_to_cpu(qw1) & 0xff;
358 if (pipr < 8)
359 pending |= 1 << pipr;
360 }
361
362 hirq = GLUE(X_PFX,scan_interrupts)(xc, pending, scan_poll);
363
364 /* Return interrupt and old CPPR in GPR4 */
365 vcpu->arch.gpr[4] = hirq | (xc->cppr << 24);
366
367 return H_SUCCESS;
368 }
369
370 static void GLUE(X_PFX,push_pending_to_hw)(struct kvmppc_xive_vcpu *xc)
371 {
372 u8 pending, prio;
373
374 pending = xc->pending;
375 if (xc->mfrr != 0xff) {
376 if (xc->mfrr < 8)
377 pending |= 1 << xc->mfrr;
378 else
379 pending |= 0x80;
380 }
381 if (!pending)
382 return;
383 prio = ffs(pending) - 1;
384
385 __x_writeb(prio, __x_tima + TM_SPC_SET_OS_PENDING);
386 }
387
388 X_STATIC int GLUE(X_PFX,h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr)
389 {
390 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
391 u8 old_cppr;
392
393 pr_devel("H_CPPR(cppr=%ld)\n", cppr);
394
395 xc->GLUE(X_STAT_PFX,h_cppr)++;
396
397 /* Map CPPR */
398 cppr = xive_prio_from_guest(cppr);
399
400 /* Remember old and update SW state */
401 old_cppr = xc->cppr;
402 xc->cppr = cppr;
403
404 /*
405 * Order the above update of xc->cppr with the subsequent
406 * read of xc->mfrr inside push_pending_to_hw()
407 */
408 smp_mb();
409
410 /*
411 * We are masking less, we need to look for pending things
412 * to deliver and set VP pending bits accordingly to trigger
413 * a new interrupt otherwise we might miss MFRR changes for
414 * which we have optimized out sending an IPI signal.
415 */
416 if (cppr > old_cppr)
417 GLUE(X_PFX,push_pending_to_hw)(xc);
418
419 /* Apply new CPPR */
420 xc->hw_cppr = cppr;
421 __x_writeb(cppr, __x_tima + TM_QW1_OS + TM_CPPR);
422
423 return H_SUCCESS;
424 }
425
426 X_STATIC int GLUE(X_PFX,h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr)
427 {
428 struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
429 struct kvmppc_xive_src_block *sb;
430 struct kvmppc_xive_irq_state *state;
431 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
432 struct xive_irq_data *xd;
433 u8 new_cppr = xirr >> 24;
434 u32 irq = xirr & 0x00ffffff, hw_num;
435 u16 src;
436 int rc = 0;
437
438 pr_devel("H_EOI(xirr=%08lx)\n", xirr);
439
440 xc->GLUE(X_STAT_PFX,h_eoi)++;
441
442 xc->cppr = xive_prio_from_guest(new_cppr);
443
444 /*
445 * IPIs are synthetized from MFRR and thus don't need
446 * any special EOI handling. The underlying interrupt
447 * used to signal MFRR changes is EOId when fetched from
448 * the queue.
449 */
450 if (irq == XICS_IPI || irq == 0) {
451 /*
452 * This barrier orders the setting of xc->cppr vs.
453 * subsquent test of xc->mfrr done inside
454 * scan_interrupts and push_pending_to_hw
455 */
456 smp_mb();
457 goto bail;
458 }
459
460 /* Find interrupt source */
461 sb = kvmppc_xive_find_source(xive, irq, &src);
462 if (!sb) {
463 pr_devel(" source not found !\n");
464 rc = H_PARAMETER;
465 /* Same as above */
466 smp_mb();
467 goto bail;
468 }
469 state = &sb->irq_state[src];
470 kvmppc_xive_select_irq(state, &hw_num, &xd);
471
472 state->in_eoi = true;
473
474 /*
475 * This barrier orders both setting of in_eoi above vs,
476 * subsequent test of guest_priority, and the setting
477 * of xc->cppr vs. subsquent test of xc->mfrr done inside
478 * scan_interrupts and push_pending_to_hw
479 */
480 smp_mb();
481
482 again:
483 if (state->guest_priority == MASKED) {
484 arch_spin_lock(&sb->lock);
485 if (state->guest_priority != MASKED) {
486 arch_spin_unlock(&sb->lock);
487 goto again;
488 }
489 pr_devel(" EOI on saved P...\n");
490
491 /* Clear old_p, that will cause unmask to perform an EOI */
492 state->old_p = false;
493
494 arch_spin_unlock(&sb->lock);
495 } else {
496 pr_devel(" EOI on source...\n");
497
498 /* Perform EOI on the source */
499 GLUE(X_PFX,source_eoi)(hw_num, xd);
500
501 /* If it's an emulated LSI, check level and resend */
502 if (state->lsi && state->asserted)
503 __x_writeq(0, __x_trig_page(xd));
504
505 }
506
507 /*
508 * This barrier orders the above guest_priority check
509 * and spin_lock/unlock with clearing in_eoi below.
510 *
511 * It also has to be a full mb() as it must ensure
512 * the MMIOs done in source_eoi() are completed before
513 * state->in_eoi is visible.
514 */
515 mb();
516 state->in_eoi = false;
517 bail:
518
519 /* Re-evaluate pending IRQs and update HW */
520 GLUE(X_PFX,scan_interrupts)(xc, xc->pending, scan_eoi);
521 GLUE(X_PFX,push_pending_to_hw)(xc);
522 pr_devel(" after scan pending=%02x\n", xc->pending);
523
524 /* Apply new CPPR */
525 xc->hw_cppr = xc->cppr;
526 __x_writeb(xc->cppr, __x_tima + TM_QW1_OS + TM_CPPR);
527
528 return rc;
529 }
530
531 X_STATIC int GLUE(X_PFX,h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
532 unsigned long mfrr)
533 {
534 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
535
536 pr_devel("H_IPI(server=%08lx,mfrr=%ld)\n", server, mfrr);
537
538 xc->GLUE(X_STAT_PFX,h_ipi)++;
539
540 /* Find target */
541 vcpu = kvmppc_xive_find_server(vcpu->kvm, server);
542 if (!vcpu)
543 return H_PARAMETER;
544 xc = vcpu->arch.xive_vcpu;
545
546 /* Locklessly write over MFRR */
547 xc->mfrr = mfrr;
548
549 /*
550 * The load of xc->cppr below and the subsequent MMIO store
551 * to the IPI must happen after the above mfrr update is
552 * globally visible so that:
553 *
554 * - Synchronize with another CPU doing an H_EOI or a H_CPPR
555 * updating xc->cppr then reading xc->mfrr.
556 *
557 * - The target of the IPI sees the xc->mfrr update
558 */
559 mb();
560
561 /* Shoot the IPI if most favored than target cppr */
562 if (mfrr < xc->cppr)
563 __x_writeq(0, __x_trig_page(&xc->vp_ipi_data));
564
565 return H_SUCCESS;
566 }