]>
Commit | Line | Data |
---|---|---|
5af50993 BH |
1 | /* |
2 | * Copyright 2017 Benjamin Herrenschmidt, IBM Corporation. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License, version 2, as | |
6 | * published by the Free Software Foundation. | |
7 | */ | |
8 | ||
9 | #define pr_fmt(fmt) "xive-kvm: " fmt | |
10 | ||
11 | #include <linux/kernel.h> | |
12 | #include <linux/kvm_host.h> | |
13 | #include <linux/err.h> | |
14 | #include <linux/gfp.h> | |
15 | #include <linux/spinlock.h> | |
16 | #include <linux/delay.h> | |
17 | #include <linux/percpu.h> | |
18 | #include <linux/cpumask.h> | |
19 | #include <asm/uaccess.h> | |
20 | #include <asm/kvm_book3s.h> | |
21 | #include <asm/kvm_ppc.h> | |
22 | #include <asm/hvcall.h> | |
23 | #include <asm/xics.h> | |
24 | #include <asm/xive.h> | |
25 | #include <asm/xive-regs.h> | |
26 | #include <asm/debug.h> | |
4415b335 | 27 | #include <asm/debugfs.h> |
5af50993 BH |
28 | #include <asm/time.h> |
29 | #include <asm/opal.h> | |
30 | ||
31 | #include <linux/debugfs.h> | |
32 | #include <linux/seq_file.h> | |
33 | ||
34 | #include "book3s_xive.h" | |
35 | ||
36 | ||
37 | /* | |
38 | * Virtual mode variants of the hcalls for use on radix/radix | |
39 | * with AIL. They require the VCPU's VP to be "pushed" | |
40 | * | |
41 | * We still instanciate them here because we use some of the | |
42 | * generated utility functions as well in this file. | |
43 | */ | |
44 | #define XIVE_RUNTIME_CHECKS | |
45 | #define X_PFX xive_vm_ | |
46 | #define X_STATIC static | |
47 | #define X_STAT_PFX stat_vm_ | |
48 | #define __x_tima xive_tima | |
49 | #define __x_eoi_page(xd) ((void __iomem *)((xd)->eoi_mmio)) | |
50 | #define __x_trig_page(xd) ((void __iomem *)((xd)->trig_mmio)) | |
5af50993 BH |
51 | #define __x_writeb __raw_writeb |
52 | #define __x_readw __raw_readw | |
53 | #define __x_readq __raw_readq | |
54 | #define __x_writeq __raw_writeq | |
55 | ||
56 | #include "book3s_xive_template.c" | |
57 | ||
58 | /* | |
59 | * We leave a gap of a couple of interrupts in the queue to | |
60 | * account for the IPI and additional safety guard. | |
61 | */ | |
62 | #define XIVE_Q_GAP 2 | |
63 | ||
64 | /* | |
65 | * This is a simple trigger for a generic XIVE IRQ. This must | |
66 | * only be called for interrupts that support a trigger page | |
67 | */ | |
68 | static bool xive_irq_trigger(struct xive_irq_data *xd) | |
69 | { | |
70 | /* This should be only for MSIs */ | |
71 | if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI)) | |
72 | return false; | |
73 | ||
74 | /* Those interrupts should always have a trigger page */ | |
75 | if (WARN_ON(!xd->trig_mmio)) | |
76 | return false; | |
77 | ||
78 | out_be64(xd->trig_mmio, 0); | |
79 | ||
80 | return true; | |
81 | } | |
82 | ||
83 | static irqreturn_t xive_esc_irq(int irq, void *data) | |
84 | { | |
85 | struct kvm_vcpu *vcpu = data; | |
86 | ||
87 | /* We use the existing H_PROD mechanism to wake up the target */ | |
88 | vcpu->arch.prodded = 1; | |
89 | smp_mb(); | |
90 | if (vcpu->arch.ceded) | |
91 | kvmppc_fast_vcpu_kick(vcpu); | |
92 | ||
93 | return IRQ_HANDLED; | |
94 | } | |
95 | ||
96 | static int xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio) | |
97 | { | |
98 | struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; | |
99 | struct xive_q *q = &xc->queues[prio]; | |
100 | char *name = NULL; | |
101 | int rc; | |
102 | ||
103 | /* Already there ? */ | |
104 | if (xc->esc_virq[prio]) | |
105 | return 0; | |
106 | ||
107 | /* Hook up the escalation interrupt */ | |
108 | xc->esc_virq[prio] = irq_create_mapping(NULL, q->esc_irq); | |
109 | if (!xc->esc_virq[prio]) { | |
110 | pr_err("Failed to map escalation interrupt for queue %d of VCPU %d\n", | |
111 | prio, xc->server_num); | |
112 | return -EIO; | |
113 | } | |
114 | ||
115 | /* | |
116 | * Future improvement: start with them disabled | |
117 | * and handle DD2 and later scheme of merged escalation | |
118 | * interrupts | |
119 | */ | |
120 | name = kasprintf(GFP_KERNEL, "kvm-%d-%d-%d", | |
121 | vcpu->kvm->arch.lpid, xc->server_num, prio); | |
122 | if (!name) { | |
123 | pr_err("Failed to allocate escalation irq name for queue %d of VCPU %d\n", | |
124 | prio, xc->server_num); | |
125 | rc = -ENOMEM; | |
126 | goto error; | |
127 | } | |
128 | rc = request_irq(xc->esc_virq[prio], xive_esc_irq, | |
129 | IRQF_NO_THREAD, name, vcpu); | |
130 | if (rc) { | |
131 | pr_err("Failed to request escalation interrupt for queue %d of VCPU %d\n", | |
132 | prio, xc->server_num); | |
133 | goto error; | |
134 | } | |
135 | xc->esc_virq_names[prio] = name; | |
136 | return 0; | |
137 | error: | |
138 | irq_dispose_mapping(xc->esc_virq[prio]); | |
139 | xc->esc_virq[prio] = 0; | |
140 | kfree(name); | |
141 | return rc; | |
142 | } | |
143 | ||
144 | static int xive_provision_queue(struct kvm_vcpu *vcpu, u8 prio) | |
145 | { | |
146 | struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; | |
147 | struct kvmppc_xive *xive = xc->xive; | |
148 | struct xive_q *q = &xc->queues[prio]; | |
149 | void *qpage; | |
150 | int rc; | |
151 | ||
152 | if (WARN_ON(q->qpage)) | |
153 | return 0; | |
154 | ||
155 | /* Allocate the queue and retrieve infos on current node for now */ | |
156 | qpage = (__be32 *)__get_free_pages(GFP_KERNEL, xive->q_page_order); | |
157 | if (!qpage) { | |
158 | pr_err("Failed to allocate queue %d for VCPU %d\n", | |
159 | prio, xc->server_num); | |
160 | return -ENOMEM;; | |
161 | } | |
162 | memset(qpage, 0, 1 << xive->q_order); | |
163 | ||
164 | /* | |
165 | * Reconfigure the queue. This will set q->qpage only once the | |
166 | * queue is fully configured. This is a requirement for prio 0 | |
167 | * as we will stop doing EOIs for every IPI as soon as we observe | |
168 | * qpage being non-NULL, and instead will only EOI when we receive | |
169 | * corresponding queue 0 entries | |
170 | */ | |
171 | rc = xive_native_configure_queue(xc->vp_id, q, prio, qpage, | |
172 | xive->q_order, true); | |
173 | if (rc) | |
174 | pr_err("Failed to configure queue %d for VCPU %d\n", | |
175 | prio, xc->server_num); | |
176 | return rc; | |
177 | } | |
178 | ||
179 | /* Called with kvm_lock held */ | |
180 | static int xive_check_provisioning(struct kvm *kvm, u8 prio) | |
181 | { | |
182 | struct kvmppc_xive *xive = kvm->arch.xive; | |
183 | struct kvm_vcpu *vcpu; | |
184 | int i, rc; | |
185 | ||
186 | lockdep_assert_held(&kvm->lock); | |
187 | ||
188 | /* Already provisioned ? */ | |
189 | if (xive->qmap & (1 << prio)) | |
190 | return 0; | |
191 | ||
192 | pr_devel("Provisioning prio... %d\n", prio); | |
193 | ||
194 | /* Provision each VCPU and enable escalations */ | |
195 | kvm_for_each_vcpu(i, vcpu, kvm) { | |
196 | if (!vcpu->arch.xive_vcpu) | |
197 | continue; | |
198 | rc = xive_provision_queue(vcpu, prio); | |
199 | if (rc == 0) | |
200 | xive_attach_escalation(vcpu, prio); | |
201 | if (rc) | |
202 | return rc; | |
203 | } | |
204 | ||
205 | /* Order previous stores and mark it as provisioned */ | |
206 | mb(); | |
207 | xive->qmap |= (1 << prio); | |
208 | return 0; | |
209 | } | |
210 | ||
211 | static void xive_inc_q_pending(struct kvm *kvm, u32 server, u8 prio) | |
212 | { | |
213 | struct kvm_vcpu *vcpu; | |
214 | struct kvmppc_xive_vcpu *xc; | |
215 | struct xive_q *q; | |
216 | ||
217 | /* Locate target server */ | |
218 | vcpu = kvmppc_xive_find_server(kvm, server); | |
219 | if (!vcpu) { | |
220 | pr_warn("%s: Can't find server %d\n", __func__, server); | |
221 | return; | |
222 | } | |
223 | xc = vcpu->arch.xive_vcpu; | |
224 | if (WARN_ON(!xc)) | |
225 | return; | |
226 | ||
227 | q = &xc->queues[prio]; | |
228 | atomic_inc(&q->pending_count); | |
229 | } | |
230 | ||
231 | static int xive_try_pick_queue(struct kvm_vcpu *vcpu, u8 prio) | |
232 | { | |
233 | struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; | |
234 | struct xive_q *q; | |
235 | u32 max; | |
236 | ||
237 | if (WARN_ON(!xc)) | |
238 | return -ENXIO; | |
239 | if (!xc->valid) | |
240 | return -ENXIO; | |
241 | ||
242 | q = &xc->queues[prio]; | |
243 | if (WARN_ON(!q->qpage)) | |
244 | return -ENXIO; | |
245 | ||
246 | /* Calculate max number of interrupts in that queue. */ | |
247 | max = (q->msk + 1) - XIVE_Q_GAP; | |
248 | return atomic_add_unless(&q->count, 1, max) ? 0 : -EBUSY; | |
249 | } | |
250 | ||
251 | static int xive_select_target(struct kvm *kvm, u32 *server, u8 prio) | |
252 | { | |
253 | struct kvm_vcpu *vcpu; | |
254 | int i, rc; | |
255 | ||
256 | /* Locate target server */ | |
257 | vcpu = kvmppc_xive_find_server(kvm, *server); | |
258 | if (!vcpu) { | |
259 | pr_devel("Can't find server %d\n", *server); | |
260 | return -EINVAL; | |
261 | } | |
262 | ||
263 | pr_devel("Finding irq target on 0x%x/%d...\n", *server, prio); | |
264 | ||
265 | /* Try pick it */ | |
266 | rc = xive_try_pick_queue(vcpu, prio); | |
267 | if (rc == 0) | |
268 | return rc; | |
269 | ||
270 | pr_devel(" .. failed, looking up candidate...\n"); | |
271 | ||
272 | /* Failed, pick another VCPU */ | |
273 | kvm_for_each_vcpu(i, vcpu, kvm) { | |
274 | if (!vcpu->arch.xive_vcpu) | |
275 | continue; | |
276 | rc = xive_try_pick_queue(vcpu, prio); | |
277 | if (rc == 0) { | |
278 | *server = vcpu->arch.xive_vcpu->server_num; | |
279 | pr_devel(" found on 0x%x/%d\n", *server, prio); | |
280 | return rc; | |
281 | } | |
282 | } | |
283 | pr_devel(" no available target !\n"); | |
284 | ||
285 | /* No available target ! */ | |
286 | return -EBUSY; | |
287 | } | |
288 | ||
289 | static u8 xive_lock_and_mask(struct kvmppc_xive *xive, | |
290 | struct kvmppc_xive_src_block *sb, | |
291 | struct kvmppc_xive_irq_state *state) | |
292 | { | |
293 | struct xive_irq_data *xd; | |
294 | u32 hw_num; | |
295 | u8 old_prio; | |
296 | u64 val; | |
297 | ||
298 | /* | |
299 | * Take the lock, set masked, try again if racing | |
300 | * with H_EOI | |
301 | */ | |
302 | for (;;) { | |
303 | arch_spin_lock(&sb->lock); | |
304 | old_prio = state->guest_priority; | |
305 | state->guest_priority = MASKED; | |
306 | mb(); | |
307 | if (!state->in_eoi) | |
308 | break; | |
309 | state->guest_priority = old_prio; | |
310 | arch_spin_unlock(&sb->lock); | |
311 | } | |
312 | ||
313 | /* No change ? Bail */ | |
314 | if (old_prio == MASKED) | |
315 | return old_prio; | |
316 | ||
317 | /* Get the right irq */ | |
318 | kvmppc_xive_select_irq(state, &hw_num, &xd); | |
319 | ||
320 | /* | |
321 | * If the interrupt is marked as needing masking via | |
322 | * firmware, we do it here. Firmware masking however | |
323 | * is "lossy", it won't return the old p and q bits | |
324 | * and won't set the interrupt to a state where it will | |
325 | * record queued ones. If this is an issue we should do | |
326 | * lazy masking instead. | |
327 | * | |
328 | * For now, we work around this in unmask by forcing | |
329 | * an interrupt whenever we unmask a non-LSI via FW | |
330 | * (if ever). | |
331 | */ | |
332 | if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) { | |
333 | xive_native_configure_irq(hw_num, | |
334 | xive->vp_base + state->act_server, | |
335 | MASKED, state->number); | |
336 | /* set old_p so we can track if an H_EOI was done */ | |
337 | state->old_p = true; | |
338 | state->old_q = false; | |
339 | } else { | |
340 | /* Set PQ to 10, return old P and old Q and remember them */ | |
341 | val = xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_10); | |
342 | state->old_p = !!(val & 2); | |
343 | state->old_q = !!(val & 1); | |
344 | ||
345 | /* | |
346 | * Synchronize hardware to sensure the queues are updated | |
347 | * when masking | |
348 | */ | |
349 | xive_native_sync_source(hw_num); | |
350 | } | |
351 | ||
352 | return old_prio; | |
353 | } | |
354 | ||
355 | static void xive_lock_for_unmask(struct kvmppc_xive_src_block *sb, | |
356 | struct kvmppc_xive_irq_state *state) | |
357 | { | |
358 | /* | |
359 | * Take the lock try again if racing with H_EOI | |
360 | */ | |
361 | for (;;) { | |
362 | arch_spin_lock(&sb->lock); | |
363 | if (!state->in_eoi) | |
364 | break; | |
365 | arch_spin_unlock(&sb->lock); | |
366 | } | |
367 | } | |
368 | ||
369 | static void xive_finish_unmask(struct kvmppc_xive *xive, | |
370 | struct kvmppc_xive_src_block *sb, | |
371 | struct kvmppc_xive_irq_state *state, | |
372 | u8 prio) | |
373 | { | |
374 | struct xive_irq_data *xd; | |
375 | u32 hw_num; | |
376 | ||
377 | /* If we aren't changing a thing, move on */ | |
378 | if (state->guest_priority != MASKED) | |
379 | goto bail; | |
380 | ||
381 | /* Get the right irq */ | |
382 | kvmppc_xive_select_irq(state, &hw_num, &xd); | |
383 | ||
384 | /* | |
385 | * See command in xive_lock_and_mask() concerning masking | |
386 | * via firmware. | |
387 | */ | |
388 | if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) { | |
389 | xive_native_configure_irq(hw_num, | |
390 | xive->vp_base + state->act_server, | |
391 | state->act_priority, state->number); | |
392 | /* If an EOI is needed, do it here */ | |
393 | if (!state->old_p) | |
394 | xive_vm_source_eoi(hw_num, xd); | |
395 | /* If this is not an LSI, force a trigger */ | |
396 | if (!(xd->flags & OPAL_XIVE_IRQ_LSI)) | |
397 | xive_irq_trigger(xd); | |
398 | goto bail; | |
399 | } | |
400 | ||
401 | /* Old Q set, set PQ to 11 */ | |
402 | if (state->old_q) | |
403 | xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_11); | |
404 | ||
405 | /* | |
406 | * If not old P, then perform an "effective" EOI, | |
407 | * on the source. This will handle the cases where | |
408 | * FW EOI is needed. | |
409 | */ | |
410 | if (!state->old_p) | |
411 | xive_vm_source_eoi(hw_num, xd); | |
412 | ||
413 | /* Synchronize ordering and mark unmasked */ | |
414 | mb(); | |
415 | bail: | |
416 | state->guest_priority = prio; | |
417 | } | |
418 | ||
419 | /* | |
420 | * Target an interrupt to a given server/prio, this will fallback | |
421 | * to another server if necessary and perform the HW targetting | |
422 | * updates as needed | |
423 | * | |
424 | * NOTE: Must be called with the state lock held | |
425 | */ | |
426 | static int xive_target_interrupt(struct kvm *kvm, | |
427 | struct kvmppc_xive_irq_state *state, | |
428 | u32 server, u8 prio) | |
429 | { | |
430 | struct kvmppc_xive *xive = kvm->arch.xive; | |
431 | u32 hw_num; | |
432 | int rc; | |
433 | ||
434 | /* | |
435 | * This will return a tentative server and actual | |
436 | * priority. The count for that new target will have | |
437 | * already been incremented. | |
438 | */ | |
439 | rc = xive_select_target(kvm, &server, prio); | |
440 | ||
441 | /* | |
442 | * We failed to find a target ? Not much we can do | |
443 | * at least until we support the GIQ. | |
444 | */ | |
445 | if (rc) | |
446 | return rc; | |
447 | ||
448 | /* | |
449 | * Increment the old queue pending count if there | |
450 | * was one so that the old queue count gets adjusted later | |
451 | * when observed to be empty. | |
452 | */ | |
453 | if (state->act_priority != MASKED) | |
454 | xive_inc_q_pending(kvm, | |
455 | state->act_server, | |
456 | state->act_priority); | |
457 | /* | |
458 | * Update state and HW | |
459 | */ | |
460 | state->act_priority = prio; | |
461 | state->act_server = server; | |
462 | ||
463 | /* Get the right irq */ | |
464 | kvmppc_xive_select_irq(state, &hw_num, NULL); | |
465 | ||
466 | return xive_native_configure_irq(hw_num, | |
467 | xive->vp_base + server, | |
468 | prio, state->number); | |
469 | } | |
470 | ||
471 | /* | |
472 | * Targetting rules: In order to avoid losing track of | |
473 | * pending interrupts accross mask and unmask, which would | |
474 | * allow queue overflows, we implement the following rules: | |
475 | * | |
476 | * - Unless it was never enabled (or we run out of capacity) | |
477 | * an interrupt is always targetted at a valid server/queue | |
478 | * pair even when "masked" by the guest. This pair tends to | |
479 | * be the last one used but it can be changed under some | |
480 | * circumstances. That allows us to separate targetting | |
481 | * from masking, we only handle accounting during (re)targetting, | |
482 | * this also allows us to let an interrupt drain into its target | |
483 | * queue after masking, avoiding complex schemes to remove | |
484 | * interrupts out of remote processor queues. | |
485 | * | |
486 | * - When masking, we set PQ to 10 and save the previous value | |
487 | * of P and Q. | |
488 | * | |
489 | * - When unmasking, if saved Q was set, we set PQ to 11 | |
490 | * otherwise we leave PQ to the HW state which will be either | |
491 | * 10 if nothing happened or 11 if the interrupt fired while | |
492 | * masked. Effectively we are OR'ing the previous Q into the | |
493 | * HW Q. | |
494 | * | |
495 | * Then if saved P is clear, we do an effective EOI (Q->P->Trigger) | |
496 | * which will unmask the interrupt and shoot a new one if Q was | |
497 | * set. | |
498 | * | |
499 | * Otherwise (saved P is set) we leave PQ unchanged (so 10 or 11, | |
500 | * effectively meaning an H_EOI from the guest is still expected | |
501 | * for that interrupt). | |
502 | * | |
503 | * - If H_EOI occurs while masked, we clear the saved P. | |
504 | * | |
505 | * - When changing target, we account on the new target and | |
506 | * increment a separate "pending" counter on the old one. | |
507 | * This pending counter will be used to decrement the old | |
508 | * target's count when its queue has been observed empty. | |
509 | */ | |
510 | ||
511 | int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server, | |
512 | u32 priority) | |
513 | { | |
514 | struct kvmppc_xive *xive = kvm->arch.xive; | |
515 | struct kvmppc_xive_src_block *sb; | |
516 | struct kvmppc_xive_irq_state *state; | |
517 | u8 new_act_prio; | |
518 | int rc = 0; | |
519 | u16 idx; | |
520 | ||
521 | if (!xive) | |
522 | return -ENODEV; | |
523 | ||
524 | pr_devel("set_xive ! irq 0x%x server 0x%x prio %d\n", | |
525 | irq, server, priority); | |
526 | ||
527 | /* First, check provisioning of queues */ | |
528 | if (priority != MASKED) | |
529 | rc = xive_check_provisioning(xive->kvm, | |
530 | xive_prio_from_guest(priority)); | |
531 | if (rc) { | |
532 | pr_devel(" provisioning failure %d !\n", rc); | |
533 | return rc; | |
534 | } | |
535 | ||
536 | sb = kvmppc_xive_find_source(xive, irq, &idx); | |
537 | if (!sb) | |
538 | return -EINVAL; | |
539 | state = &sb->irq_state[idx]; | |
540 | ||
541 | /* | |
542 | * We first handle masking/unmasking since the locking | |
543 | * might need to be retried due to EOIs, we'll handle | |
544 | * targetting changes later. These functions will return | |
545 | * with the SB lock held. | |
546 | * | |
547 | * xive_lock_and_mask() will also set state->guest_priority | |
548 | * but won't otherwise change other fields of the state. | |
549 | * | |
550 | * xive_lock_for_unmask will not actually unmask, this will | |
551 | * be done later by xive_finish_unmask() once the targetting | |
552 | * has been done, so we don't try to unmask an interrupt | |
553 | * that hasn't yet been targetted. | |
554 | */ | |
555 | if (priority == MASKED) | |
556 | xive_lock_and_mask(xive, sb, state); | |
557 | else | |
558 | xive_lock_for_unmask(sb, state); | |
559 | ||
560 | ||
561 | /* | |
562 | * Then we handle targetting. | |
563 | * | |
564 | * First calculate a new "actual priority" | |
565 | */ | |
566 | new_act_prio = state->act_priority; | |
567 | if (priority != MASKED) | |
568 | new_act_prio = xive_prio_from_guest(priority); | |
569 | ||
570 | pr_devel(" new_act_prio=%x act_server=%x act_prio=%x\n", | |
571 | new_act_prio, state->act_server, state->act_priority); | |
572 | ||
573 | /* | |
574 | * Then check if we actually need to change anything, | |
575 | * | |
576 | * The condition for re-targetting the interrupt is that | |
577 | * we have a valid new priority (new_act_prio is not 0xff) | |
578 | * and either the server or the priority changed. | |
579 | * | |
580 | * Note: If act_priority was ff and the new priority is | |
581 | * also ff, we don't do anything and leave the interrupt | |
582 | * untargetted. An attempt of doing an int_on on an | |
583 | * untargetted interrupt will fail. If that is a problem | |
584 | * we could initialize interrupts with valid default | |
585 | */ | |
586 | ||
587 | if (new_act_prio != MASKED && | |
588 | (state->act_server != server || | |
589 | state->act_priority != new_act_prio)) | |
590 | rc = xive_target_interrupt(kvm, state, server, new_act_prio); | |
591 | ||
592 | /* | |
593 | * Perform the final unmasking of the interrupt source | |
594 | * if necessary | |
595 | */ | |
596 | if (priority != MASKED) | |
597 | xive_finish_unmask(xive, sb, state, priority); | |
598 | ||
599 | /* | |
600 | * Finally Update saved_priority to match. Only int_on/off | |
601 | * set this field to a different value. | |
602 | */ | |
603 | state->saved_priority = priority; | |
604 | ||
605 | arch_spin_unlock(&sb->lock); | |
606 | return rc; | |
607 | } | |
608 | ||
609 | int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server, | |
610 | u32 *priority) | |
611 | { | |
612 | struct kvmppc_xive *xive = kvm->arch.xive; | |
613 | struct kvmppc_xive_src_block *sb; | |
614 | struct kvmppc_xive_irq_state *state; | |
615 | u16 idx; | |
616 | ||
617 | if (!xive) | |
618 | return -ENODEV; | |
619 | ||
620 | sb = kvmppc_xive_find_source(xive, irq, &idx); | |
621 | if (!sb) | |
622 | return -EINVAL; | |
623 | state = &sb->irq_state[idx]; | |
624 | arch_spin_lock(&sb->lock); | |
2fb1e946 | 625 | *server = state->act_server; |
5af50993 BH |
626 | *priority = state->guest_priority; |
627 | arch_spin_unlock(&sb->lock); | |
628 | ||
629 | return 0; | |
630 | } | |
631 | ||
632 | int kvmppc_xive_int_on(struct kvm *kvm, u32 irq) | |
633 | { | |
634 | struct kvmppc_xive *xive = kvm->arch.xive; | |
635 | struct kvmppc_xive_src_block *sb; | |
636 | struct kvmppc_xive_irq_state *state; | |
637 | u16 idx; | |
638 | ||
639 | if (!xive) | |
640 | return -ENODEV; | |
641 | ||
642 | sb = kvmppc_xive_find_source(xive, irq, &idx); | |
643 | if (!sb) | |
644 | return -EINVAL; | |
645 | state = &sb->irq_state[idx]; | |
646 | ||
647 | pr_devel("int_on(irq=0x%x)\n", irq); | |
648 | ||
649 | /* | |
650 | * Check if interrupt was not targetted | |
651 | */ | |
652 | if (state->act_priority == MASKED) { | |
653 | pr_devel("int_on on untargetted interrupt\n"); | |
654 | return -EINVAL; | |
655 | } | |
656 | ||
657 | /* If saved_priority is 0xff, do nothing */ | |
658 | if (state->saved_priority == MASKED) | |
659 | return 0; | |
660 | ||
661 | /* | |
662 | * Lock and unmask it. | |
663 | */ | |
664 | xive_lock_for_unmask(sb, state); | |
665 | xive_finish_unmask(xive, sb, state, state->saved_priority); | |
666 | arch_spin_unlock(&sb->lock); | |
667 | ||
668 | return 0; | |
669 | } | |
670 | ||
671 | int kvmppc_xive_int_off(struct kvm *kvm, u32 irq) | |
672 | { | |
673 | struct kvmppc_xive *xive = kvm->arch.xive; | |
674 | struct kvmppc_xive_src_block *sb; | |
675 | struct kvmppc_xive_irq_state *state; | |
676 | u16 idx; | |
677 | ||
678 | if (!xive) | |
679 | return -ENODEV; | |
680 | ||
681 | sb = kvmppc_xive_find_source(xive, irq, &idx); | |
682 | if (!sb) | |
683 | return -EINVAL; | |
684 | state = &sb->irq_state[idx]; | |
685 | ||
686 | pr_devel("int_off(irq=0x%x)\n", irq); | |
687 | ||
688 | /* | |
689 | * Lock and mask | |
690 | */ | |
691 | state->saved_priority = xive_lock_and_mask(xive, sb, state); | |
692 | arch_spin_unlock(&sb->lock); | |
693 | ||
694 | return 0; | |
695 | } | |
696 | ||
697 | static bool xive_restore_pending_irq(struct kvmppc_xive *xive, u32 irq) | |
698 | { | |
699 | struct kvmppc_xive_src_block *sb; | |
700 | struct kvmppc_xive_irq_state *state; | |
701 | u16 idx; | |
702 | ||
703 | sb = kvmppc_xive_find_source(xive, irq, &idx); | |
704 | if (!sb) | |
705 | return false; | |
706 | state = &sb->irq_state[idx]; | |
707 | if (!state->valid) | |
708 | return false; | |
709 | ||
710 | /* | |
711 | * Trigger the IPI. This assumes we never restore a pass-through | |
712 | * interrupt which should be safe enough | |
713 | */ | |
714 | xive_irq_trigger(&state->ipi_data); | |
715 | ||
716 | return true; | |
717 | } | |
718 | ||
719 | u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu) | |
720 | { | |
721 | struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; | |
722 | ||
723 | if (!xc) | |
724 | return 0; | |
725 | ||
726 | /* Return the per-cpu state for state saving/migration */ | |
727 | return (u64)xc->cppr << KVM_REG_PPC_ICP_CPPR_SHIFT | | |
728 | (u64)xc->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT; | |
729 | } | |
730 | ||
731 | int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) | |
732 | { | |
733 | struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; | |
734 | struct kvmppc_xive *xive = vcpu->kvm->arch.xive; | |
735 | u8 cppr, mfrr; | |
736 | u32 xisr; | |
737 | ||
738 | if (!xc || !xive) | |
739 | return -ENOENT; | |
740 | ||
741 | /* Grab individual state fields. We don't use pending_pri */ | |
742 | cppr = icpval >> KVM_REG_PPC_ICP_CPPR_SHIFT; | |
743 | xisr = (icpval >> KVM_REG_PPC_ICP_XISR_SHIFT) & | |
744 | KVM_REG_PPC_ICP_XISR_MASK; | |
745 | mfrr = icpval >> KVM_REG_PPC_ICP_MFRR_SHIFT; | |
746 | ||
747 | pr_devel("set_icp vcpu %d cppr=0x%x mfrr=0x%x xisr=0x%x\n", | |
748 | xc->server_num, cppr, mfrr, xisr); | |
749 | ||
750 | /* | |
751 | * We can't update the state of a "pushed" VCPU, but that | |
752 | * shouldn't happen. | |
753 | */ | |
754 | if (WARN_ON(vcpu->arch.xive_pushed)) | |
755 | return -EIO; | |
756 | ||
757 | /* Update VCPU HW saved state */ | |
758 | vcpu->arch.xive_saved_state.cppr = cppr; | |
759 | xc->hw_cppr = xc->cppr = cppr; | |
760 | ||
761 | /* | |
762 | * Update MFRR state. If it's not 0xff, we mark the VCPU as | |
763 | * having a pending MFRR change, which will re-evaluate the | |
764 | * target. The VCPU will thus potentially get a spurious | |
765 | * interrupt but that's not a big deal. | |
766 | */ | |
767 | xc->mfrr = mfrr; | |
768 | if (mfrr < cppr) | |
769 | xive_irq_trigger(&xc->vp_ipi_data); | |
770 | ||
771 | /* | |
772 | * Now saved XIRR is "interesting". It means there's something in | |
773 | * the legacy "1 element" queue... for an IPI we simply ignore it, | |
774 | * as the MFRR restore will handle that. For anything else we need | |
775 | * to force a resend of the source. | |
776 | * However the source may not have been setup yet. If that's the | |
777 | * case, we keep that info and increment a counter in the xive to | |
778 | * tell subsequent xive_set_source() to go look. | |
779 | */ | |
780 | if (xisr > XICS_IPI && !xive_restore_pending_irq(xive, xisr)) { | |
781 | xc->delayed_irq = xisr; | |
782 | xive->delayed_irqs++; | |
783 | pr_devel(" xisr restore delayed\n"); | |
784 | } | |
785 | ||
786 | return 0; | |
787 | } | |
788 | ||
789 | int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq, | |
790 | struct irq_desc *host_desc) | |
791 | { | |
792 | struct kvmppc_xive *xive = kvm->arch.xive; | |
793 | struct kvmppc_xive_src_block *sb; | |
794 | struct kvmppc_xive_irq_state *state; | |
795 | struct irq_data *host_data = irq_desc_get_irq_data(host_desc); | |
796 | unsigned int host_irq = irq_desc_get_irq(host_desc); | |
797 | unsigned int hw_irq = (unsigned int)irqd_to_hwirq(host_data); | |
798 | u16 idx; | |
799 | u8 prio; | |
800 | int rc; | |
801 | ||
802 | if (!xive) | |
803 | return -ENODEV; | |
804 | ||
805 | pr_devel("set_mapped girq 0x%lx host HW irq 0x%x...\n",guest_irq, hw_irq); | |
806 | ||
807 | sb = kvmppc_xive_find_source(xive, guest_irq, &idx); | |
808 | if (!sb) | |
809 | return -EINVAL; | |
810 | state = &sb->irq_state[idx]; | |
811 | ||
812 | /* | |
813 | * Mark the passed-through interrupt as going to a VCPU, | |
814 | * this will prevent further EOIs and similar operations | |
815 | * from the XIVE code. It will also mask the interrupt | |
816 | * to either PQ=10 or 11 state, the latter if the interrupt | |
817 | * is pending. This will allow us to unmask or retrigger it | |
818 | * after routing it to the guest with a simple EOI. | |
819 | * | |
820 | * The "state" argument is a "token", all it needs is to be | |
821 | * non-NULL to switch to passed-through or NULL for the | |
822 | * other way around. We may not yet have an actual VCPU | |
823 | * target here and we don't really care. | |
824 | */ | |
825 | rc = irq_set_vcpu_affinity(host_irq, state); | |
826 | if (rc) { | |
827 | pr_err("Failed to set VCPU affinity for irq %d\n", host_irq); | |
828 | return rc; | |
829 | } | |
830 | ||
831 | /* | |
832 | * Mask and read state of IPI. We need to know if its P bit | |
833 | * is set as that means it's potentially already using a | |
834 | * queue entry in the target | |
835 | */ | |
836 | prio = xive_lock_and_mask(xive, sb, state); | |
837 | pr_devel(" old IPI prio %02x P:%d Q:%d\n", prio, | |
838 | state->old_p, state->old_q); | |
839 | ||
840 | /* Turn the IPI hard off */ | |
841 | xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01); | |
842 | ||
843 | /* Grab info about irq */ | |
844 | state->pt_number = hw_irq; | |
845 | state->pt_data = irq_data_get_irq_handler_data(host_data); | |
846 | ||
847 | /* | |
848 | * Configure the IRQ to match the existing configuration of | |
849 | * the IPI if it was already targetted. Otherwise this will | |
850 | * mask the interrupt in a lossy way (act_priority is 0xff) | |
851 | * which is fine for a never started interrupt. | |
852 | */ | |
853 | xive_native_configure_irq(hw_irq, | |
854 | xive->vp_base + state->act_server, | |
855 | state->act_priority, state->number); | |
856 | ||
857 | /* | |
858 | * We do an EOI to enable the interrupt (and retrigger if needed) | |
859 | * if the guest has the interrupt unmasked and the P bit was *not* | |
860 | * set in the IPI. If it was set, we know a slot may still be in | |
861 | * use in the target queue thus we have to wait for a guest | |
862 | * originated EOI | |
863 | */ | |
864 | if (prio != MASKED && !state->old_p) | |
865 | xive_vm_source_eoi(hw_irq, state->pt_data); | |
866 | ||
867 | /* Clear old_p/old_q as they are no longer relevant */ | |
868 | state->old_p = state->old_q = false; | |
869 | ||
870 | /* Restore guest prio (unlocks EOI) */ | |
871 | mb(); | |
872 | state->guest_priority = prio; | |
873 | arch_spin_unlock(&sb->lock); | |
874 | ||
875 | return 0; | |
876 | } | |
877 | EXPORT_SYMBOL_GPL(kvmppc_xive_set_mapped); | |
878 | ||
879 | int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq, | |
880 | struct irq_desc *host_desc) | |
881 | { | |
882 | struct kvmppc_xive *xive = kvm->arch.xive; | |
883 | struct kvmppc_xive_src_block *sb; | |
884 | struct kvmppc_xive_irq_state *state; | |
885 | unsigned int host_irq = irq_desc_get_irq(host_desc); | |
886 | u16 idx; | |
887 | u8 prio; | |
888 | int rc; | |
889 | ||
890 | if (!xive) | |
891 | return -ENODEV; | |
892 | ||
893 | pr_devel("clr_mapped girq 0x%lx...\n", guest_irq); | |
894 | ||
895 | sb = kvmppc_xive_find_source(xive, guest_irq, &idx); | |
896 | if (!sb) | |
897 | return -EINVAL; | |
898 | state = &sb->irq_state[idx]; | |
899 | ||
900 | /* | |
901 | * Mask and read state of IRQ. We need to know if its P bit | |
902 | * is set as that means it's potentially already using a | |
903 | * queue entry in the target | |
904 | */ | |
905 | prio = xive_lock_and_mask(xive, sb, state); | |
906 | pr_devel(" old IRQ prio %02x P:%d Q:%d\n", prio, | |
907 | state->old_p, state->old_q); | |
908 | ||
909 | /* | |
910 | * If old_p is set, the interrupt is pending, we switch it to | |
911 | * PQ=11. This will force a resend in the host so the interrupt | |
912 | * isn't lost to whatver host driver may pick it up | |
913 | */ | |
914 | if (state->old_p) | |
915 | xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_11); | |
916 | ||
917 | /* Release the passed-through interrupt to the host */ | |
918 | rc = irq_set_vcpu_affinity(host_irq, NULL); | |
919 | if (rc) { | |
920 | pr_err("Failed to clr VCPU affinity for irq %d\n", host_irq); | |
921 | return rc; | |
922 | } | |
923 | ||
924 | /* Forget about the IRQ */ | |
925 | state->pt_number = 0; | |
926 | state->pt_data = NULL; | |
927 | ||
928 | /* Reconfigure the IPI */ | |
929 | xive_native_configure_irq(state->ipi_number, | |
930 | xive->vp_base + state->act_server, | |
931 | state->act_priority, state->number); | |
932 | ||
933 | /* | |
934 | * If old_p is set (we have a queue entry potentially | |
935 | * occupied) or the interrupt is masked, we set the IPI | |
936 | * to PQ=10 state. Otherwise we just re-enable it (PQ=00). | |
937 | */ | |
938 | if (prio == MASKED || state->old_p) | |
939 | xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_10); | |
940 | else | |
941 | xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_00); | |
942 | ||
943 | /* Restore guest prio (unlocks EOI) */ | |
944 | mb(); | |
945 | state->guest_priority = prio; | |
946 | arch_spin_unlock(&sb->lock); | |
947 | ||
948 | return 0; | |
949 | } | |
950 | EXPORT_SYMBOL_GPL(kvmppc_xive_clr_mapped); | |
951 | ||
952 | static void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu) | |
953 | { | |
954 | struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; | |
955 | struct kvm *kvm = vcpu->kvm; | |
956 | struct kvmppc_xive *xive = kvm->arch.xive; | |
957 | int i, j; | |
958 | ||
959 | for (i = 0; i <= xive->max_sbid; i++) { | |
960 | struct kvmppc_xive_src_block *sb = xive->src_blocks[i]; | |
961 | ||
962 | if (!sb) | |
963 | continue; | |
964 | for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) { | |
965 | struct kvmppc_xive_irq_state *state = &sb->irq_state[j]; | |
966 | ||
967 | if (!state->valid) | |
968 | continue; | |
969 | if (state->act_priority == MASKED) | |
970 | continue; | |
971 | if (state->act_server != xc->server_num) | |
972 | continue; | |
973 | ||
974 | /* Clean it up */ | |
975 | arch_spin_lock(&sb->lock); | |
976 | state->act_priority = MASKED; | |
977 | xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01); | |
978 | xive_native_configure_irq(state->ipi_number, 0, MASKED, 0); | |
979 | if (state->pt_number) { | |
980 | xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_01); | |
981 | xive_native_configure_irq(state->pt_number, 0, MASKED, 0); | |
982 | } | |
983 | arch_spin_unlock(&sb->lock); | |
984 | } | |
985 | } | |
986 | } | |
987 | ||
988 | void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu) | |
989 | { | |
990 | struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; | |
991 | struct kvmppc_xive *xive = xc->xive; | |
992 | int i; | |
993 | ||
994 | pr_devel("cleanup_vcpu(cpu=%d)\n", xc->server_num); | |
995 | ||
996 | /* Ensure no interrupt is still routed to that VP */ | |
997 | xc->valid = false; | |
998 | kvmppc_xive_disable_vcpu_interrupts(vcpu); | |
999 | ||
1000 | /* Mask the VP IPI */ | |
1001 | xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_01); | |
1002 | ||
1003 | /* Disable the VP */ | |
1004 | xive_native_disable_vp(xc->vp_id); | |
1005 | ||
1006 | /* Free the queues & associated interrupts */ | |
1007 | for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) { | |
1008 | struct xive_q *q = &xc->queues[i]; | |
1009 | ||
1010 | /* Free the escalation irq */ | |
1011 | if (xc->esc_virq[i]) { | |
1012 | free_irq(xc->esc_virq[i], vcpu); | |
1013 | irq_dispose_mapping(xc->esc_virq[i]); | |
1014 | kfree(xc->esc_virq_names[i]); | |
1015 | } | |
1016 | /* Free the queue */ | |
1017 | xive_native_disable_queue(xc->vp_id, q, i); | |
1018 | if (q->qpage) { | |
1019 | free_pages((unsigned long)q->qpage, | |
1020 | xive->q_page_order); | |
1021 | q->qpage = NULL; | |
1022 | } | |
1023 | } | |
1024 | ||
1025 | /* Free the IPI */ | |
1026 | if (xc->vp_ipi) { | |
1027 | xive_cleanup_irq_data(&xc->vp_ipi_data); | |
1028 | xive_native_free_irq(xc->vp_ipi); | |
1029 | } | |
1030 | /* Free the VP */ | |
1031 | kfree(xc); | |
1032 | } | |
1033 | ||
1034 | int kvmppc_xive_connect_vcpu(struct kvm_device *dev, | |
1035 | struct kvm_vcpu *vcpu, u32 cpu) | |
1036 | { | |
1037 | struct kvmppc_xive *xive = dev->private; | |
1038 | struct kvmppc_xive_vcpu *xc; | |
1039 | int i, r = -EBUSY; | |
1040 | ||
1041 | pr_devel("connect_vcpu(cpu=%d)\n", cpu); | |
1042 | ||
1043 | if (dev->ops != &kvm_xive_ops) { | |
1044 | pr_devel("Wrong ops !\n"); | |
1045 | return -EPERM; | |
1046 | } | |
1047 | if (xive->kvm != vcpu->kvm) | |
1048 | return -EPERM; | |
1049 | if (vcpu->arch.irq_type) | |
1050 | return -EBUSY; | |
1051 | if (kvmppc_xive_find_server(vcpu->kvm, cpu)) { | |
1052 | pr_devel("Duplicate !\n"); | |
1053 | return -EEXIST; | |
1054 | } | |
1055 | if (cpu >= KVM_MAX_VCPUS) { | |
1056 | pr_devel("Out of bounds !\n"); | |
1057 | return -EINVAL; | |
1058 | } | |
1059 | xc = kzalloc(sizeof(*xc), GFP_KERNEL); | |
1060 | if (!xc) | |
1061 | return -ENOMEM; | |
1062 | ||
1063 | /* We need to synchronize with queue provisioning */ | |
1064 | mutex_lock(&vcpu->kvm->lock); | |
1065 | vcpu->arch.xive_vcpu = xc; | |
1066 | xc->xive = xive; | |
1067 | xc->vcpu = vcpu; | |
1068 | xc->server_num = cpu; | |
1069 | xc->vp_id = xive->vp_base + cpu; | |
1070 | xc->mfrr = 0xff; | |
1071 | xc->valid = true; | |
1072 | ||
1073 | r = xive_native_get_vp_info(xc->vp_id, &xc->vp_cam, &xc->vp_chip_id); | |
1074 | if (r) | |
1075 | goto bail; | |
1076 | ||
1077 | /* Configure VCPU fields for use by assembly push/pull */ | |
1078 | vcpu->arch.xive_saved_state.w01 = cpu_to_be64(0xff000000); | |
1079 | vcpu->arch.xive_cam_word = cpu_to_be32(xc->vp_cam | TM_QW1W2_VO); | |
1080 | ||
1081 | /* Allocate IPI */ | |
1082 | xc->vp_ipi = xive_native_alloc_irq(); | |
1083 | if (!xc->vp_ipi) { | |
1084 | r = -EIO; | |
1085 | goto bail; | |
1086 | } | |
1087 | pr_devel(" IPI=0x%x\n", xc->vp_ipi); | |
1088 | ||
1089 | r = xive_native_populate_irq_data(xc->vp_ipi, &xc->vp_ipi_data); | |
1090 | if (r) | |
1091 | goto bail; | |
1092 | ||
1093 | /* | |
1094 | * Initialize queues. Initially we set them all for no queueing | |
1095 | * and we enable escalation for queue 0 only which we'll use for | |
1096 | * our mfrr change notifications. If the VCPU is hot-plugged, we | |
1097 | * do handle provisioning however. | |
1098 | */ | |
1099 | for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) { | |
1100 | struct xive_q *q = &xc->queues[i]; | |
1101 | ||
1102 | /* Is queue already enabled ? Provision it */ | |
1103 | if (xive->qmap & (1 << i)) { | |
1104 | r = xive_provision_queue(vcpu, i); | |
1105 | if (r == 0) | |
1106 | xive_attach_escalation(vcpu, i); | |
1107 | if (r) | |
1108 | goto bail; | |
1109 | } else { | |
1110 | r = xive_native_configure_queue(xc->vp_id, | |
1111 | q, i, NULL, 0, true); | |
1112 | if (r) { | |
1113 | pr_err("Failed to configure queue %d for VCPU %d\n", | |
1114 | i, cpu); | |
1115 | goto bail; | |
1116 | } | |
1117 | } | |
1118 | } | |
1119 | ||
1120 | /* If not done above, attach priority 0 escalation */ | |
1121 | r = xive_attach_escalation(vcpu, 0); | |
1122 | if (r) | |
1123 | goto bail; | |
1124 | ||
1125 | /* Enable the VP */ | |
1126 | r = xive_native_enable_vp(xc->vp_id); | |
1127 | if (r) | |
1128 | goto bail; | |
1129 | ||
1130 | /* Route the IPI */ | |
1131 | r = xive_native_configure_irq(xc->vp_ipi, xc->vp_id, 0, XICS_IPI); | |
1132 | if (!r) | |
1133 | xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_00); | |
1134 | ||
1135 | bail: | |
1136 | mutex_unlock(&vcpu->kvm->lock); | |
1137 | if (r) { | |
1138 | kvmppc_xive_cleanup_vcpu(vcpu); | |
1139 | return r; | |
1140 | } | |
1141 | ||
1142 | vcpu->arch.irq_type = KVMPPC_IRQ_XICS; | |
1143 | return 0; | |
1144 | } | |
1145 | ||
1146 | /* | |
1147 | * Scanning of queues before/after migration save | |
1148 | */ | |
1149 | static void xive_pre_save_set_queued(struct kvmppc_xive *xive, u32 irq) | |
1150 | { | |
1151 | struct kvmppc_xive_src_block *sb; | |
1152 | struct kvmppc_xive_irq_state *state; | |
1153 | u16 idx; | |
1154 | ||
1155 | sb = kvmppc_xive_find_source(xive, irq, &idx); | |
1156 | if (!sb) | |
1157 | return; | |
1158 | ||
1159 | state = &sb->irq_state[idx]; | |
1160 | ||
1161 | /* Some sanity checking */ | |
1162 | if (!state->valid) { | |
1163 | pr_err("invalid irq 0x%x in cpu queue!\n", irq); | |
1164 | return; | |
1165 | } | |
1166 | ||
1167 | /* | |
1168 | * If the interrupt is in a queue it should have P set. | |
1169 | * We warn so that gets reported. A backtrace isn't useful | |
1170 | * so no need to use a WARN_ON. | |
1171 | */ | |
1172 | if (!state->saved_p) | |
1173 | pr_err("Interrupt 0x%x is marked in a queue but P not set !\n", irq); | |
1174 | ||
1175 | /* Set flag */ | |
1176 | state->in_queue = true; | |
1177 | } | |
1178 | ||
1179 | static void xive_pre_save_mask_irq(struct kvmppc_xive *xive, | |
1180 | struct kvmppc_xive_src_block *sb, | |
1181 | u32 irq) | |
1182 | { | |
1183 | struct kvmppc_xive_irq_state *state = &sb->irq_state[irq]; | |
1184 | ||
1185 | if (!state->valid) | |
1186 | return; | |
1187 | ||
1188 | /* Mask and save state, this will also sync HW queues */ | |
1189 | state->saved_scan_prio = xive_lock_and_mask(xive, sb, state); | |
1190 | ||
1191 | /* Transfer P and Q */ | |
1192 | state->saved_p = state->old_p; | |
1193 | state->saved_q = state->old_q; | |
1194 | ||
1195 | /* Unlock */ | |
1196 | arch_spin_unlock(&sb->lock); | |
1197 | } | |
1198 | ||
1199 | static void xive_pre_save_unmask_irq(struct kvmppc_xive *xive, | |
1200 | struct kvmppc_xive_src_block *sb, | |
1201 | u32 irq) | |
1202 | { | |
1203 | struct kvmppc_xive_irq_state *state = &sb->irq_state[irq]; | |
1204 | ||
1205 | if (!state->valid) | |
1206 | return; | |
1207 | ||
1208 | /* | |
1209 | * Lock / exclude EOI (not technically necessary if the | |
1210 | * guest isn't running concurrently. If this becomes a | |
1211 | * performance issue we can probably remove the lock. | |
1212 | */ | |
1213 | xive_lock_for_unmask(sb, state); | |
1214 | ||
1215 | /* Restore mask/prio if it wasn't masked */ | |
1216 | if (state->saved_scan_prio != MASKED) | |
1217 | xive_finish_unmask(xive, sb, state, state->saved_scan_prio); | |
1218 | ||
1219 | /* Unlock */ | |
1220 | arch_spin_unlock(&sb->lock); | |
1221 | } | |
1222 | ||
1223 | static void xive_pre_save_queue(struct kvmppc_xive *xive, struct xive_q *q) | |
1224 | { | |
1225 | u32 idx = q->idx; | |
1226 | u32 toggle = q->toggle; | |
1227 | u32 irq; | |
1228 | ||
1229 | do { | |
1230 | irq = __xive_read_eq(q->qpage, q->msk, &idx, &toggle); | |
1231 | if (irq > XICS_IPI) | |
1232 | xive_pre_save_set_queued(xive, irq); | |
1233 | } while(irq); | |
1234 | } | |
1235 | ||
1236 | static void xive_pre_save_scan(struct kvmppc_xive *xive) | |
1237 | { | |
1238 | struct kvm_vcpu *vcpu = NULL; | |
1239 | int i, j; | |
1240 | ||
1241 | /* | |
1242 | * See comment in xive_get_source() about how this | |
1243 | * work. Collect a stable state for all interrupts | |
1244 | */ | |
1245 | for (i = 0; i <= xive->max_sbid; i++) { | |
1246 | struct kvmppc_xive_src_block *sb = xive->src_blocks[i]; | |
1247 | if (!sb) | |
1248 | continue; | |
1249 | for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) | |
1250 | xive_pre_save_mask_irq(xive, sb, j); | |
1251 | } | |
1252 | ||
1253 | /* Then scan the queues and update the "in_queue" flag */ | |
1254 | kvm_for_each_vcpu(i, vcpu, xive->kvm) { | |
1255 | struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; | |
1256 | if (!xc) | |
1257 | continue; | |
1258 | for (j = 0; j < KVMPPC_XIVE_Q_COUNT; j++) { | |
00c14757 PM |
1259 | if (xc->queues[j].qpage) |
1260 | xive_pre_save_queue(xive, &xc->queues[j]); | |
5af50993 BH |
1261 | } |
1262 | } | |
1263 | ||
1264 | /* Finally restore interrupt states */ | |
1265 | for (i = 0; i <= xive->max_sbid; i++) { | |
1266 | struct kvmppc_xive_src_block *sb = xive->src_blocks[i]; | |
1267 | if (!sb) | |
1268 | continue; | |
1269 | for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) | |
1270 | xive_pre_save_unmask_irq(xive, sb, j); | |
1271 | } | |
1272 | } | |
1273 | ||
1274 | static void xive_post_save_scan(struct kvmppc_xive *xive) | |
1275 | { | |
1276 | u32 i, j; | |
1277 | ||
1278 | /* Clear all the in_queue flags */ | |
1279 | for (i = 0; i <= xive->max_sbid; i++) { | |
1280 | struct kvmppc_xive_src_block *sb = xive->src_blocks[i]; | |
1281 | if (!sb) | |
1282 | continue; | |
1283 | for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) | |
1284 | sb->irq_state[j].in_queue = false; | |
1285 | } | |
1286 | ||
1287 | /* Next get_source() will do a new scan */ | |
1288 | xive->saved_src_count = 0; | |
1289 | } | |
1290 | ||
1291 | /* | |
1292 | * This returns the source configuration and state to user space. | |
1293 | */ | |
1294 | static int xive_get_source(struct kvmppc_xive *xive, long irq, u64 addr) | |
1295 | { | |
1296 | struct kvmppc_xive_src_block *sb; | |
1297 | struct kvmppc_xive_irq_state *state; | |
1298 | u64 __user *ubufp = (u64 __user *) addr; | |
1299 | u64 val, prio; | |
1300 | u16 idx; | |
1301 | ||
1302 | sb = kvmppc_xive_find_source(xive, irq, &idx); | |
1303 | if (!sb) | |
1304 | return -ENOENT; | |
1305 | ||
1306 | state = &sb->irq_state[idx]; | |
1307 | ||
1308 | if (!state->valid) | |
1309 | return -ENOENT; | |
1310 | ||
1311 | pr_devel("get_source(%ld)...\n", irq); | |
1312 | ||
1313 | /* | |
1314 | * So to properly save the state into something that looks like a | |
1315 | * XICS migration stream we cannot treat interrupts individually. | |
1316 | * | |
1317 | * We need, instead, mask them all (& save their previous PQ state) | |
1318 | * to get a stable state in the HW, then sync them to ensure that | |
1319 | * any interrupt that had already fired hits its queue, and finally | |
1320 | * scan all the queues to collect which interrupts are still present | |
1321 | * in the queues, so we can set the "pending" flag on them and | |
1322 | * they can be resent on restore. | |
1323 | * | |
1324 | * So we do it all when the "first" interrupt gets saved, all the | |
1325 | * state is collected at that point, the rest of xive_get_source() | |
1326 | * will merely collect and convert that state to the expected | |
1327 | * userspace bit mask. | |
1328 | */ | |
1329 | if (xive->saved_src_count == 0) | |
1330 | xive_pre_save_scan(xive); | |
1331 | xive->saved_src_count++; | |
1332 | ||
1333 | /* Convert saved state into something compatible with xics */ | |
2fb1e946 | 1334 | val = state->act_server; |
5af50993 BH |
1335 | prio = state->saved_scan_prio; |
1336 | ||
1337 | if (prio == MASKED) { | |
1338 | val |= KVM_XICS_MASKED; | |
1339 | prio = state->saved_priority; | |
1340 | } | |
1341 | val |= prio << KVM_XICS_PRIORITY_SHIFT; | |
1342 | if (state->lsi) { | |
1343 | val |= KVM_XICS_LEVEL_SENSITIVE; | |
1344 | if (state->saved_p) | |
1345 | val |= KVM_XICS_PENDING; | |
1346 | } else { | |
1347 | if (state->saved_p) | |
1348 | val |= KVM_XICS_PRESENTED; | |
1349 | ||
1350 | if (state->saved_q) | |
1351 | val |= KVM_XICS_QUEUED; | |
1352 | ||
1353 | /* | |
1354 | * We mark it pending (which will attempt a re-delivery) | |
1355 | * if we are in a queue *or* we were masked and had | |
1356 | * Q set which is equivalent to the XICS "masked pending" | |
1357 | * state | |
1358 | */ | |
1359 | if (state->in_queue || (prio == MASKED && state->saved_q)) | |
1360 | val |= KVM_XICS_PENDING; | |
1361 | } | |
1362 | ||
1363 | /* | |
1364 | * If that was the last interrupt saved, reset the | |
1365 | * in_queue flags | |
1366 | */ | |
1367 | if (xive->saved_src_count == xive->src_count) | |
1368 | xive_post_save_scan(xive); | |
1369 | ||
1370 | /* Copy the result to userspace */ | |
1371 | if (put_user(val, ubufp)) | |
1372 | return -EFAULT; | |
1373 | ||
1374 | return 0; | |
1375 | } | |
1376 | ||
1377 | static struct kvmppc_xive_src_block *xive_create_src_block(struct kvmppc_xive *xive, | |
1378 | int irq) | |
1379 | { | |
1380 | struct kvm *kvm = xive->kvm; | |
1381 | struct kvmppc_xive_src_block *sb; | |
1382 | int i, bid; | |
1383 | ||
1384 | bid = irq >> KVMPPC_XICS_ICS_SHIFT; | |
1385 | ||
1386 | mutex_lock(&kvm->lock); | |
1387 | ||
1388 | /* block already exists - somebody else got here first */ | |
1389 | if (xive->src_blocks[bid]) | |
1390 | goto out; | |
1391 | ||
1392 | /* Create the ICS */ | |
1393 | sb = kzalloc(sizeof(*sb), GFP_KERNEL); | |
1394 | if (!sb) | |
1395 | goto out; | |
1396 | ||
1397 | sb->id = bid; | |
1398 | ||
1399 | for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) { | |
1400 | sb->irq_state[i].number = (bid << KVMPPC_XICS_ICS_SHIFT) | i; | |
1401 | sb->irq_state[i].guest_priority = MASKED; | |
1402 | sb->irq_state[i].saved_priority = MASKED; | |
1403 | sb->irq_state[i].act_priority = MASKED; | |
1404 | } | |
1405 | smp_wmb(); | |
1406 | xive->src_blocks[bid] = sb; | |
1407 | ||
1408 | if (bid > xive->max_sbid) | |
1409 | xive->max_sbid = bid; | |
1410 | ||
1411 | out: | |
1412 | mutex_unlock(&kvm->lock); | |
1413 | return xive->src_blocks[bid]; | |
1414 | } | |
1415 | ||
1416 | static bool xive_check_delayed_irq(struct kvmppc_xive *xive, u32 irq) | |
1417 | { | |
1418 | struct kvm *kvm = xive->kvm; | |
1419 | struct kvm_vcpu *vcpu = NULL; | |
1420 | int i; | |
1421 | ||
1422 | kvm_for_each_vcpu(i, vcpu, kvm) { | |
1423 | struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; | |
1424 | ||
1425 | if (!xc) | |
1426 | continue; | |
1427 | ||
1428 | if (xc->delayed_irq == irq) { | |
1429 | xc->delayed_irq = 0; | |
1430 | xive->delayed_irqs--; | |
1431 | return true; | |
1432 | } | |
1433 | } | |
1434 | return false; | |
1435 | } | |
1436 | ||
1437 | static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr) | |
1438 | { | |
1439 | struct kvmppc_xive_src_block *sb; | |
1440 | struct kvmppc_xive_irq_state *state; | |
1441 | u64 __user *ubufp = (u64 __user *) addr; | |
1442 | u16 idx; | |
1443 | u64 val; | |
1444 | u8 act_prio, guest_prio; | |
1445 | u32 server; | |
1446 | int rc = 0; | |
1447 | ||
1448 | if (irq < KVMPPC_XICS_FIRST_IRQ || irq >= KVMPPC_XICS_NR_IRQS) | |
1449 | return -ENOENT; | |
1450 | ||
1451 | pr_devel("set_source(irq=0x%lx)\n", irq); | |
1452 | ||
1453 | /* Find the source */ | |
1454 | sb = kvmppc_xive_find_source(xive, irq, &idx); | |
1455 | if (!sb) { | |
1456 | pr_devel("No source, creating source block...\n"); | |
1457 | sb = xive_create_src_block(xive, irq); | |
1458 | if (!sb) { | |
1459 | pr_devel("Failed to create block...\n"); | |
1460 | return -ENOMEM; | |
1461 | } | |
1462 | } | |
1463 | state = &sb->irq_state[idx]; | |
1464 | ||
1465 | /* Read user passed data */ | |
1466 | if (get_user(val, ubufp)) { | |
1467 | pr_devel("fault getting user info !\n"); | |
1468 | return -EFAULT; | |
1469 | } | |
1470 | ||
1471 | server = val & KVM_XICS_DESTINATION_MASK; | |
1472 | guest_prio = val >> KVM_XICS_PRIORITY_SHIFT; | |
1473 | ||
1474 | pr_devel(" val=0x016%llx (server=0x%x, guest_prio=%d)\n", | |
1475 | val, server, guest_prio); | |
1476 | /* | |
1477 | * If the source doesn't already have an IPI, allocate | |
1478 | * one and get the corresponding data | |
1479 | */ | |
1480 | if (!state->ipi_number) { | |
1481 | state->ipi_number = xive_native_alloc_irq(); | |
1482 | if (state->ipi_number == 0) { | |
1483 | pr_devel("Failed to allocate IPI !\n"); | |
1484 | return -ENOMEM; | |
1485 | } | |
1486 | xive_native_populate_irq_data(state->ipi_number, &state->ipi_data); | |
1487 | pr_devel(" src_ipi=0x%x\n", state->ipi_number); | |
1488 | } | |
1489 | ||
1490 | /* | |
1491 | * We use lock_and_mask() to set us in the right masked | |
1492 | * state. We will override that state from the saved state | |
1493 | * further down, but this will handle the cases of interrupts | |
1494 | * that need FW masking. We set the initial guest_priority to | |
1495 | * 0 before calling it to ensure it actually performs the masking. | |
1496 | */ | |
1497 | state->guest_priority = 0; | |
1498 | xive_lock_and_mask(xive, sb, state); | |
1499 | ||
1500 | /* | |
1501 | * Now, we select a target if we have one. If we don't we | |
1502 | * leave the interrupt untargetted. It means that an interrupt | |
1503 | * can become "untargetted" accross migration if it was masked | |
1504 | * by set_xive() but there is little we can do about it. | |
1505 | */ | |
1506 | ||
1507 | /* First convert prio and mark interrupt as untargetted */ | |
1508 | act_prio = xive_prio_from_guest(guest_prio); | |
1509 | state->act_priority = MASKED; | |
5af50993 BH |
1510 | |
1511 | /* | |
1512 | * We need to drop the lock due to the mutex below. Hopefully | |
1513 | * nothing is touching that interrupt yet since it hasn't been | |
1514 | * advertized to a running guest yet | |
1515 | */ | |
1516 | arch_spin_unlock(&sb->lock); | |
1517 | ||
1518 | /* If we have a priority target the interrupt */ | |
1519 | if (act_prio != MASKED) { | |
1520 | /* First, check provisioning of queues */ | |
1521 | mutex_lock(&xive->kvm->lock); | |
1522 | rc = xive_check_provisioning(xive->kvm, act_prio); | |
1523 | mutex_unlock(&xive->kvm->lock); | |
1524 | ||
1525 | /* Target interrupt */ | |
1526 | if (rc == 0) | |
1527 | rc = xive_target_interrupt(xive->kvm, state, | |
1528 | server, act_prio); | |
1529 | /* | |
1530 | * If provisioning or targetting failed, leave it | |
1531 | * alone and masked. It will remain disabled until | |
1532 | * the guest re-targets it. | |
1533 | */ | |
1534 | } | |
1535 | ||
1536 | /* | |
1537 | * Find out if this was a delayed irq stashed in an ICP, | |
1538 | * in which case, treat it as pending | |
1539 | */ | |
1540 | if (xive->delayed_irqs && xive_check_delayed_irq(xive, irq)) { | |
1541 | val |= KVM_XICS_PENDING; | |
1542 | pr_devel(" Found delayed ! forcing PENDING !\n"); | |
1543 | } | |
1544 | ||
1545 | /* Cleanup the SW state */ | |
1546 | state->old_p = false; | |
1547 | state->old_q = false; | |
1548 | state->lsi = false; | |
1549 | state->asserted = false; | |
1550 | ||
1551 | /* Restore LSI state */ | |
1552 | if (val & KVM_XICS_LEVEL_SENSITIVE) { | |
1553 | state->lsi = true; | |
1554 | if (val & KVM_XICS_PENDING) | |
1555 | state->asserted = true; | |
1556 | pr_devel(" LSI ! Asserted=%d\n", state->asserted); | |
1557 | } | |
1558 | ||
1559 | /* | |
1560 | * Restore P and Q. If the interrupt was pending, we | |
1561 | * force both P and Q, which will trigger a resend. | |
1562 | * | |
1563 | * That means that a guest that had both an interrupt | |
1564 | * pending (queued) and Q set will restore with only | |
1565 | * one instance of that interrupt instead of 2, but that | |
1566 | * is perfectly fine as coalescing interrupts that haven't | |
1567 | * been presented yet is always allowed. | |
1568 | */ | |
1569 | if (val & KVM_XICS_PRESENTED || val & KVM_XICS_PENDING) | |
1570 | state->old_p = true; | |
1571 | if (val & KVM_XICS_QUEUED || val & KVM_XICS_PENDING) | |
1572 | state->old_q = true; | |
1573 | ||
1574 | pr_devel(" P=%d, Q=%d\n", state->old_p, state->old_q); | |
1575 | ||
1576 | /* | |
1577 | * If the interrupt was unmasked, update guest priority and | |
1578 | * perform the appropriate state transition and do a | |
1579 | * re-trigger if necessary. | |
1580 | */ | |
1581 | if (val & KVM_XICS_MASKED) { | |
1582 | pr_devel(" masked, saving prio\n"); | |
1583 | state->guest_priority = MASKED; | |
1584 | state->saved_priority = guest_prio; | |
1585 | } else { | |
1586 | pr_devel(" unmasked, restoring to prio %d\n", guest_prio); | |
1587 | xive_finish_unmask(xive, sb, state, guest_prio); | |
1588 | state->saved_priority = guest_prio; | |
1589 | } | |
1590 | ||
1591 | /* Increment the number of valid sources and mark this one valid */ | |
1592 | if (!state->valid) | |
1593 | xive->src_count++; | |
1594 | state->valid = true; | |
1595 | ||
1596 | return 0; | |
1597 | } | |
1598 | ||
1599 | int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, | |
1600 | bool line_status) | |
1601 | { | |
1602 | struct kvmppc_xive *xive = kvm->arch.xive; | |
1603 | struct kvmppc_xive_src_block *sb; | |
1604 | struct kvmppc_xive_irq_state *state; | |
1605 | u16 idx; | |
1606 | ||
1607 | if (!xive) | |
1608 | return -ENODEV; | |
1609 | ||
1610 | sb = kvmppc_xive_find_source(xive, irq, &idx); | |
1611 | if (!sb) | |
1612 | return -EINVAL; | |
1613 | ||
1614 | /* Perform locklessly .... (we need to do some RCUisms here...) */ | |
1615 | state = &sb->irq_state[idx]; | |
1616 | if (!state->valid) | |
1617 | return -EINVAL; | |
1618 | ||
1619 | /* We don't allow a trigger on a passed-through interrupt */ | |
1620 | if (state->pt_number) | |
1621 | return -EINVAL; | |
1622 | ||
1623 | if ((level == 1 && state->lsi) || level == KVM_INTERRUPT_SET_LEVEL) | |
1624 | state->asserted = 1; | |
1625 | else if (level == 0 || level == KVM_INTERRUPT_UNSET) { | |
1626 | state->asserted = 0; | |
1627 | return 0; | |
1628 | } | |
1629 | ||
1630 | /* Trigger the IPI */ | |
1631 | xive_irq_trigger(&state->ipi_data); | |
1632 | ||
1633 | return 0; | |
1634 | } | |
1635 | ||
1636 | static int xive_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) | |
1637 | { | |
1638 | struct kvmppc_xive *xive = dev->private; | |
1639 | ||
1640 | /* We honor the existing XICS ioctl */ | |
1641 | switch (attr->group) { | |
1642 | case KVM_DEV_XICS_GRP_SOURCES: | |
1643 | return xive_set_source(xive, attr->attr, attr->addr); | |
1644 | } | |
1645 | return -ENXIO; | |
1646 | } | |
1647 | ||
1648 | static int xive_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr) | |
1649 | { | |
1650 | struct kvmppc_xive *xive = dev->private; | |
1651 | ||
1652 | /* We honor the existing XICS ioctl */ | |
1653 | switch (attr->group) { | |
1654 | case KVM_DEV_XICS_GRP_SOURCES: | |
1655 | return xive_get_source(xive, attr->attr, attr->addr); | |
1656 | } | |
1657 | return -ENXIO; | |
1658 | } | |
1659 | ||
1660 | static int xive_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr) | |
1661 | { | |
1662 | /* We honor the same limits as XICS, at least for now */ | |
1663 | switch (attr->group) { | |
1664 | case KVM_DEV_XICS_GRP_SOURCES: | |
1665 | if (attr->attr >= KVMPPC_XICS_FIRST_IRQ && | |
1666 | attr->attr < KVMPPC_XICS_NR_IRQS) | |
1667 | return 0; | |
1668 | break; | |
1669 | } | |
1670 | return -ENXIO; | |
1671 | } | |
1672 | ||
1673 | static void kvmppc_xive_cleanup_irq(u32 hw_num, struct xive_irq_data *xd) | |
1674 | { | |
1675 | xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01); | |
1676 | xive_native_configure_irq(hw_num, 0, MASKED, 0); | |
1677 | xive_cleanup_irq_data(xd); | |
1678 | } | |
1679 | ||
1680 | static void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb) | |
1681 | { | |
1682 | int i; | |
1683 | ||
1684 | for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) { | |
1685 | struct kvmppc_xive_irq_state *state = &sb->irq_state[i]; | |
1686 | ||
1687 | if (!state->valid) | |
1688 | continue; | |
1689 | ||
1690 | kvmppc_xive_cleanup_irq(state->ipi_number, &state->ipi_data); | |
1691 | xive_native_free_irq(state->ipi_number); | |
1692 | ||
1693 | /* Pass-through, cleanup too */ | |
1694 | if (state->pt_number) | |
1695 | kvmppc_xive_cleanup_irq(state->pt_number, state->pt_data); | |
1696 | ||
1697 | state->valid = false; | |
1698 | } | |
1699 | } | |
1700 | ||
1701 | static void kvmppc_xive_free(struct kvm_device *dev) | |
1702 | { | |
1703 | struct kvmppc_xive *xive = dev->private; | |
1704 | struct kvm *kvm = xive->kvm; | |
1705 | int i; | |
1706 | ||
1707 | debugfs_remove(xive->dentry); | |
1708 | ||
1709 | if (kvm) | |
1710 | kvm->arch.xive = NULL; | |
1711 | ||
1712 | /* Mask and free interrupts */ | |
1713 | for (i = 0; i <= xive->max_sbid; i++) { | |
1714 | if (xive->src_blocks[i]) | |
1715 | kvmppc_xive_free_sources(xive->src_blocks[i]); | |
1716 | kfree(xive->src_blocks[i]); | |
1717 | xive->src_blocks[i] = NULL; | |
1718 | } | |
1719 | ||
1720 | if (xive->vp_base != XIVE_INVALID_VP) | |
1721 | xive_native_free_vp_block(xive->vp_base); | |
1722 | ||
1723 | ||
1724 | kfree(xive); | |
1725 | kfree(dev); | |
1726 | } | |
1727 | ||
1728 | static int kvmppc_xive_create(struct kvm_device *dev, u32 type) | |
1729 | { | |
1730 | struct kvmppc_xive *xive; | |
1731 | struct kvm *kvm = dev->kvm; | |
1732 | int ret = 0; | |
1733 | ||
1734 | pr_devel("Creating xive for partition\n"); | |
1735 | ||
1736 | xive = kzalloc(sizeof(*xive), GFP_KERNEL); | |
1737 | if (!xive) | |
1738 | return -ENOMEM; | |
1739 | ||
1740 | dev->private = xive; | |
1741 | xive->dev = dev; | |
1742 | xive->kvm = kvm; | |
1743 | ||
1744 | /* Already there ? */ | |
1745 | if (kvm->arch.xive) | |
1746 | ret = -EEXIST; | |
1747 | else | |
1748 | kvm->arch.xive = xive; | |
1749 | ||
1750 | /* We use the default queue size set by the host */ | |
1751 | xive->q_order = xive_native_default_eq_shift(); | |
1752 | if (xive->q_order < PAGE_SHIFT) | |
1753 | xive->q_page_order = 0; | |
1754 | else | |
1755 | xive->q_page_order = xive->q_order - PAGE_SHIFT; | |
1756 | ||
1757 | /* Allocate a bunch of VPs */ | |
1758 | xive->vp_base = xive_native_alloc_vp_block(KVM_MAX_VCPUS); | |
1759 | pr_devel("VP_Base=%x\n", xive->vp_base); | |
1760 | ||
1761 | if (xive->vp_base == XIVE_INVALID_VP) | |
1762 | ret = -ENOMEM; | |
1763 | ||
1764 | if (ret) { | |
1765 | kfree(xive); | |
1766 | return ret; | |
1767 | } | |
1768 | ||
1769 | return 0; | |
1770 | } | |
1771 | ||
1772 | ||
1773 | static int xive_debug_show(struct seq_file *m, void *private) | |
1774 | { | |
1775 | struct kvmppc_xive *xive = m->private; | |
1776 | struct kvm *kvm = xive->kvm; | |
1777 | struct kvm_vcpu *vcpu; | |
1778 | u64 t_rm_h_xirr = 0; | |
1779 | u64 t_rm_h_ipoll = 0; | |
1780 | u64 t_rm_h_cppr = 0; | |
1781 | u64 t_rm_h_eoi = 0; | |
1782 | u64 t_rm_h_ipi = 0; | |
1783 | u64 t_vm_h_xirr = 0; | |
1784 | u64 t_vm_h_ipoll = 0; | |
1785 | u64 t_vm_h_cppr = 0; | |
1786 | u64 t_vm_h_eoi = 0; | |
1787 | u64 t_vm_h_ipi = 0; | |
1788 | unsigned int i; | |
1789 | ||
1790 | if (!kvm) | |
1791 | return 0; | |
1792 | ||
1793 | seq_printf(m, "=========\nVCPU state\n=========\n"); | |
1794 | ||
1795 | kvm_for_each_vcpu(i, vcpu, kvm) { | |
1796 | struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; | |
1797 | ||
1798 | if (!xc) | |
1799 | continue; | |
1800 | ||
1801 | seq_printf(m, "cpu server %#x CPPR:%#x HWCPPR:%#x" | |
1802 | " MFRR:%#x PEND:%#x h_xirr: R=%lld V=%lld\n", | |
1803 | xc->server_num, xc->cppr, xc->hw_cppr, | |
1804 | xc->mfrr, xc->pending, | |
1805 | xc->stat_rm_h_xirr, xc->stat_vm_h_xirr); | |
1806 | ||
1807 | t_rm_h_xirr += xc->stat_rm_h_xirr; | |
1808 | t_rm_h_ipoll += xc->stat_rm_h_ipoll; | |
1809 | t_rm_h_cppr += xc->stat_rm_h_cppr; | |
1810 | t_rm_h_eoi += xc->stat_rm_h_eoi; | |
1811 | t_rm_h_ipi += xc->stat_rm_h_ipi; | |
1812 | t_vm_h_xirr += xc->stat_vm_h_xirr; | |
1813 | t_vm_h_ipoll += xc->stat_vm_h_ipoll; | |
1814 | t_vm_h_cppr += xc->stat_vm_h_cppr; | |
1815 | t_vm_h_eoi += xc->stat_vm_h_eoi; | |
1816 | t_vm_h_ipi += xc->stat_vm_h_ipi; | |
1817 | } | |
1818 | ||
1819 | seq_printf(m, "Hcalls totals\n"); | |
1820 | seq_printf(m, " H_XIRR R=%10lld V=%10lld\n", t_rm_h_xirr, t_vm_h_xirr); | |
1821 | seq_printf(m, " H_IPOLL R=%10lld V=%10lld\n", t_rm_h_ipoll, t_vm_h_ipoll); | |
1822 | seq_printf(m, " H_CPPR R=%10lld V=%10lld\n", t_rm_h_cppr, t_vm_h_cppr); | |
1823 | seq_printf(m, " H_EOI R=%10lld V=%10lld\n", t_rm_h_eoi, t_vm_h_eoi); | |
1824 | seq_printf(m, " H_IPI R=%10lld V=%10lld\n", t_rm_h_ipi, t_vm_h_ipi); | |
1825 | ||
1826 | return 0; | |
1827 | } | |
1828 | ||
1829 | static int xive_debug_open(struct inode *inode, struct file *file) | |
1830 | { | |
1831 | return single_open(file, xive_debug_show, inode->i_private); | |
1832 | } | |
1833 | ||
1834 | static const struct file_operations xive_debug_fops = { | |
1835 | .open = xive_debug_open, | |
1836 | .read = seq_read, | |
1837 | .llseek = seq_lseek, | |
1838 | .release = single_release, | |
1839 | }; | |
1840 | ||
1841 | static void xive_debugfs_init(struct kvmppc_xive *xive) | |
1842 | { | |
1843 | char *name; | |
1844 | ||
1845 | name = kasprintf(GFP_KERNEL, "kvm-xive-%p", xive); | |
1846 | if (!name) { | |
1847 | pr_err("%s: no memory for name\n", __func__); | |
1848 | return; | |
1849 | } | |
1850 | ||
1851 | xive->dentry = debugfs_create_file(name, S_IRUGO, powerpc_debugfs_root, | |
1852 | xive, &xive_debug_fops); | |
1853 | ||
1854 | pr_debug("%s: created %s\n", __func__, name); | |
1855 | kfree(name); | |
1856 | } | |
1857 | ||
1858 | static void kvmppc_xive_init(struct kvm_device *dev) | |
1859 | { | |
1860 | struct kvmppc_xive *xive = (struct kvmppc_xive *)dev->private; | |
1861 | ||
1862 | /* Register some debug interfaces */ | |
1863 | xive_debugfs_init(xive); | |
1864 | } | |
1865 | ||
1866 | struct kvm_device_ops kvm_xive_ops = { | |
1867 | .name = "kvm-xive", | |
1868 | .create = kvmppc_xive_create, | |
1869 | .init = kvmppc_xive_init, | |
1870 | .destroy = kvmppc_xive_free, | |
1871 | .set_attr = xive_set_attr, | |
1872 | .get_attr = xive_get_attr, | |
1873 | .has_attr = xive_has_attr, | |
1874 | }; | |
1875 | ||
1876 | void kvmppc_xive_init_module(void) | |
1877 | { | |
1878 | __xive_vm_h_xirr = xive_vm_h_xirr; | |
1879 | __xive_vm_h_ipoll = xive_vm_h_ipoll; | |
1880 | __xive_vm_h_ipi = xive_vm_h_ipi; | |
1881 | __xive_vm_h_cppr = xive_vm_h_cppr; | |
1882 | __xive_vm_h_eoi = xive_vm_h_eoi; | |
1883 | } | |
1884 | ||
1885 | void kvmppc_xive_exit_module(void) | |
1886 | { | |
1887 | __xive_vm_h_xirr = NULL; | |
1888 | __xive_vm_h_ipoll = NULL; | |
1889 | __xive_vm_h_ipi = NULL; | |
1890 | __xive_vm_h_cppr = NULL; | |
1891 | __xive_vm_h_eoi = NULL; | |
1892 | } |