]>
Commit | Line | Data |
---|---|---|
2874c5fd | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
243e2511 BH |
2 | /* |
3 | * Copyright 2016,2017 IBM Corporation. | |
243e2511 BH |
4 | */ |
5 | ||
6 | #define pr_fmt(fmt) "xive: " fmt | |
7 | ||
8 | #include <linux/types.h> | |
9 | #include <linux/irq.h> | |
10 | #include <linux/debugfs.h> | |
11 | #include <linux/smp.h> | |
12 | #include <linux/interrupt.h> | |
13 | #include <linux/seq_file.h> | |
14 | #include <linux/init.h> | |
15 | #include <linux/of.h> | |
16 | #include <linux/slab.h> | |
17 | #include <linux/spinlock.h> | |
18 | #include <linux/delay.h> | |
19 | #include <linux/cpumask.h> | |
20 | #include <linux/mm.h> | |
21 | ||
22 | #include <asm/prom.h> | |
23 | #include <asm/io.h> | |
24 | #include <asm/smp.h> | |
25 | #include <asm/irq.h> | |
26 | #include <asm/errno.h> | |
27 | #include <asm/xive.h> | |
28 | #include <asm/xive-regs.h> | |
29 | #include <asm/opal.h> | |
5af50993 | 30 | #include <asm/kvm_ppc.h> |
243e2511 BH |
31 | |
32 | #include "xive-internal.h" | |
33 | ||
34 | ||
35 | static u32 xive_provision_size; | |
36 | static u32 *xive_provision_chips; | |
37 | static u32 xive_provision_chip_count; | |
38 | static u32 xive_queue_shift; | |
39 | static u32 xive_pool_vps = XIVE_INVALID_VP; | |
40 | static struct kmem_cache *xive_provision_cache; | |
bf4159da | 41 | static bool xive_has_single_esc; |
243e2511 BH |
42 | |
43 | int xive_native_populate_irq_data(u32 hw_irq, struct xive_irq_data *data) | |
44 | { | |
45 | __be64 flags, eoi_page, trig_page; | |
46 | __be32 esb_shift, src_chip; | |
47 | u64 opal_flags; | |
48 | s64 rc; | |
49 | ||
50 | memset(data, 0, sizeof(*data)); | |
51 | ||
52 | rc = opal_xive_get_irq_info(hw_irq, &flags, &eoi_page, &trig_page, | |
53 | &esb_shift, &src_chip); | |
54 | if (rc) { | |
55 | pr_err("opal_xive_get_irq_info(0x%x) returned %lld\n", | |
56 | hw_irq, rc); | |
57 | return -EINVAL; | |
58 | } | |
59 | ||
60 | opal_flags = be64_to_cpu(flags); | |
61 | if (opal_flags & OPAL_XIVE_IRQ_STORE_EOI) | |
62 | data->flags |= XIVE_IRQ_FLAG_STORE_EOI; | |
63 | if (opal_flags & OPAL_XIVE_IRQ_LSI) | |
64 | data->flags |= XIVE_IRQ_FLAG_LSI; | |
65 | if (opal_flags & OPAL_XIVE_IRQ_SHIFT_BUG) | |
66 | data->flags |= XIVE_IRQ_FLAG_SHIFT_BUG; | |
67 | if (opal_flags & OPAL_XIVE_IRQ_MASK_VIA_FW) | |
68 | data->flags |= XIVE_IRQ_FLAG_MASK_FW; | |
69 | if (opal_flags & OPAL_XIVE_IRQ_EOI_VIA_FW) | |
70 | data->flags |= XIVE_IRQ_FLAG_EOI_FW; | |
71 | data->eoi_page = be64_to_cpu(eoi_page); | |
72 | data->trig_page = be64_to_cpu(trig_page); | |
73 | data->esb_shift = be32_to_cpu(esb_shift); | |
74 | data->src_chip = be32_to_cpu(src_chip); | |
75 | ||
76 | data->eoi_mmio = ioremap(data->eoi_page, 1u << data->esb_shift); | |
77 | if (!data->eoi_mmio) { | |
78 | pr_err("Failed to map EOI page for irq 0x%x\n", hw_irq); | |
79 | return -ENOMEM; | |
80 | } | |
81 | ||
c58a14a9 CLG |
82 | data->hw_irq = hw_irq; |
83 | ||
243e2511 BH |
84 | if (!data->trig_page) |
85 | return 0; | |
86 | if (data->trig_page == data->eoi_page) { | |
87 | data->trig_mmio = data->eoi_mmio; | |
88 | return 0; | |
89 | } | |
90 | ||
91 | data->trig_mmio = ioremap(data->trig_page, 1u << data->esb_shift); | |
92 | if (!data->trig_mmio) { | |
93 | pr_err("Failed to map trigger page for irq 0x%x\n", hw_irq); | |
94 | return -ENOMEM; | |
95 | } | |
96 | return 0; | |
97 | } | |
5af50993 | 98 | EXPORT_SYMBOL_GPL(xive_native_populate_irq_data); |
243e2511 BH |
99 | |
100 | int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq) | |
101 | { | |
102 | s64 rc; | |
103 | ||
104 | for (;;) { | |
105 | rc = opal_xive_set_irq_config(hw_irq, target, prio, sw_irq); | |
106 | if (rc != OPAL_BUSY) | |
107 | break; | |
9c3250a1 | 108 | msleep(OPAL_BUSY_DELAY_MS); |
243e2511 BH |
109 | } |
110 | return rc == 0 ? 0 : -ENXIO; | |
111 | } | |
5af50993 BH |
112 | EXPORT_SYMBOL_GPL(xive_native_configure_irq); |
113 | ||
243e2511 BH |
114 | |
115 | /* This can be called multiple time to change a queue configuration */ | |
116 | int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio, | |
117 | __be32 *qpage, u32 order, bool can_escalate) | |
118 | { | |
119 | s64 rc = 0; | |
120 | __be64 qeoi_page_be; | |
121 | __be32 esc_irq_be; | |
122 | u64 flags, qpage_phys; | |
123 | ||
124 | /* If there's an actual queue page, clean it */ | |
125 | if (order) { | |
126 | if (WARN_ON(!qpage)) | |
127 | return -EINVAL; | |
128 | qpage_phys = __pa(qpage); | |
129 | } else | |
130 | qpage_phys = 0; | |
131 | ||
132 | /* Initialize the rest of the fields */ | |
133 | q->msk = order ? ((1u << (order - 2)) - 1) : 0; | |
134 | q->idx = 0; | |
135 | q->toggle = 0; | |
136 | ||
137 | rc = opal_xive_get_queue_info(vp_id, prio, NULL, NULL, | |
138 | &qeoi_page_be, | |
139 | &esc_irq_be, | |
140 | NULL); | |
141 | if (rc) { | |
142 | pr_err("Error %lld getting queue info prio %d\n", rc, prio); | |
143 | rc = -EIO; | |
144 | goto fail; | |
145 | } | |
146 | q->eoi_phys = be64_to_cpu(qeoi_page_be); | |
147 | ||
148 | /* Default flags */ | |
149 | flags = OPAL_XIVE_EQ_ALWAYS_NOTIFY | OPAL_XIVE_EQ_ENABLED; | |
150 | ||
151 | /* Escalation needed ? */ | |
152 | if (can_escalate) { | |
153 | q->esc_irq = be32_to_cpu(esc_irq_be); | |
154 | flags |= OPAL_XIVE_EQ_ESCALATE; | |
155 | } | |
156 | ||
157 | /* Configure and enable the queue in HW */ | |
158 | for (;;) { | |
159 | rc = opal_xive_set_queue_info(vp_id, prio, qpage_phys, order, flags); | |
160 | if (rc != OPAL_BUSY) | |
161 | break; | |
9c3250a1 | 162 | msleep(OPAL_BUSY_DELAY_MS); |
243e2511 BH |
163 | } |
164 | if (rc) { | |
165 | pr_err("Error %lld setting queue for prio %d\n", rc, prio); | |
166 | rc = -EIO; | |
167 | } else { | |
168 | /* | |
169 | * KVM code requires all of the above to be visible before | |
170 | * q->qpage is set due to how it manages IPI EOIs | |
171 | */ | |
172 | wmb(); | |
173 | q->qpage = qpage; | |
174 | } | |
175 | fail: | |
176 | return rc; | |
177 | } | |
5af50993 | 178 | EXPORT_SYMBOL_GPL(xive_native_configure_queue); |
243e2511 BH |
179 | |
180 | static void __xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio) | |
181 | { | |
182 | s64 rc; | |
183 | ||
184 | /* Disable the queue in HW */ | |
185 | for (;;) { | |
186 | rc = opal_xive_set_queue_info(vp_id, prio, 0, 0, 0); | |
686978b1 | 187 | if (rc != OPAL_BUSY) |
243e2511 | 188 | break; |
9c3250a1 | 189 | msleep(OPAL_BUSY_DELAY_MS); |
243e2511 BH |
190 | } |
191 | if (rc) | |
192 | pr_err("Error %lld disabling queue for prio %d\n", rc, prio); | |
193 | } | |
194 | ||
195 | void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio) | |
196 | { | |
197 | __xive_native_disable_queue(vp_id, q, prio); | |
198 | } | |
5af50993 | 199 | EXPORT_SYMBOL_GPL(xive_native_disable_queue); |
243e2511 BH |
200 | |
201 | static int xive_native_setup_queue(unsigned int cpu, struct xive_cpu *xc, u8 prio) | |
202 | { | |
203 | struct xive_q *q = &xc->queue[prio]; | |
243e2511 BH |
204 | __be32 *qpage; |
205 | ||
994ea2f4 CLG |
206 | qpage = xive_queue_page_alloc(cpu, xive_queue_shift); |
207 | if (IS_ERR(qpage)) | |
208 | return PTR_ERR(qpage); | |
209 | ||
243e2511 BH |
210 | return xive_native_configure_queue(get_hard_smp_processor_id(cpu), |
211 | q, prio, qpage, xive_queue_shift, false); | |
212 | } | |
213 | ||
214 | static void xive_native_cleanup_queue(unsigned int cpu, struct xive_cpu *xc, u8 prio) | |
215 | { | |
216 | struct xive_q *q = &xc->queue[prio]; | |
217 | unsigned int alloc_order; | |
218 | ||
219 | /* | |
220 | * We use the variant with no iounmap as this is called on exec | |
221 | * from an IPI and iounmap isn't safe | |
222 | */ | |
223 | __xive_native_disable_queue(get_hard_smp_processor_id(cpu), q, prio); | |
994ea2f4 | 224 | alloc_order = xive_alloc_order(xive_queue_shift); |
243e2511 BH |
225 | free_pages((unsigned long)q->qpage, alloc_order); |
226 | q->qpage = NULL; | |
227 | } | |
228 | ||
229 | static bool xive_native_match(struct device_node *node) | |
230 | { | |
231 | return of_device_is_compatible(node, "ibm,opal-xive-vc"); | |
232 | } | |
233 | ||
234 | #ifdef CONFIG_SMP | |
235 | static int xive_native_get_ipi(unsigned int cpu, struct xive_cpu *xc) | |
236 | { | |
243e2511 BH |
237 | s64 irq; |
238 | ||
243e2511 BH |
239 | /* Allocate an IPI and populate info about it */ |
240 | for (;;) { | |
8ac9e5bf | 241 | irq = opal_xive_allocate_irq(xc->chip_id); |
243e2511 | 242 | if (irq == OPAL_BUSY) { |
9c3250a1 | 243 | msleep(OPAL_BUSY_DELAY_MS); |
243e2511 BH |
244 | continue; |
245 | } | |
246 | if (irq < 0) { | |
247 | pr_err("Failed to allocate IPI on CPU %d\n", cpu); | |
248 | return -ENXIO; | |
249 | } | |
250 | xc->hw_ipi = irq; | |
251 | break; | |
252 | } | |
253 | return 0; | |
254 | } | |
5af50993 | 255 | #endif /* CONFIG_SMP */ |
243e2511 BH |
256 | |
257 | u32 xive_native_alloc_irq(void) | |
258 | { | |
259 | s64 rc; | |
260 | ||
261 | for (;;) { | |
262 | rc = opal_xive_allocate_irq(OPAL_XIVE_ANY_CHIP); | |
263 | if (rc != OPAL_BUSY) | |
264 | break; | |
9c3250a1 | 265 | msleep(OPAL_BUSY_DELAY_MS); |
243e2511 BH |
266 | } |
267 | if (rc < 0) | |
268 | return 0; | |
269 | return rc; | |
270 | } | |
5af50993 | 271 | EXPORT_SYMBOL_GPL(xive_native_alloc_irq); |
243e2511 BH |
272 | |
273 | void xive_native_free_irq(u32 irq) | |
274 | { | |
275 | for (;;) { | |
276 | s64 rc = opal_xive_free_irq(irq); | |
277 | if (rc != OPAL_BUSY) | |
278 | break; | |
9c3250a1 | 279 | msleep(OPAL_BUSY_DELAY_MS); |
243e2511 BH |
280 | } |
281 | } | |
5af50993 | 282 | EXPORT_SYMBOL_GPL(xive_native_free_irq); |
243e2511 | 283 | |
5af50993 | 284 | #ifdef CONFIG_SMP |
243e2511 BH |
285 | static void xive_native_put_ipi(unsigned int cpu, struct xive_cpu *xc) |
286 | { | |
287 | s64 rc; | |
288 | ||
289 | /* Free the IPI */ | |
290 | if (!xc->hw_ipi) | |
291 | return; | |
292 | for (;;) { | |
293 | rc = opal_xive_free_irq(xc->hw_ipi); | |
294 | if (rc == OPAL_BUSY) { | |
9c3250a1 | 295 | msleep(OPAL_BUSY_DELAY_MS); |
243e2511 BH |
296 | continue; |
297 | } | |
298 | xc->hw_ipi = 0; | |
299 | break; | |
300 | } | |
301 | } | |
302 | #endif /* CONFIG_SMP */ | |
303 | ||
304 | static void xive_native_shutdown(void) | |
305 | { | |
306 | /* Switch the XIVE to emulation mode */ | |
307 | opal_xive_reset(OPAL_XIVE_MODE_EMU); | |
308 | } | |
309 | ||
310 | /* | |
311 | * Perform an "ack" cycle on the current thread, thus | |
312 | * grabbing the pending active priorities and updating | |
313 | * the CPPR to the most favored one. | |
314 | */ | |
315 | static void xive_native_update_pending(struct xive_cpu *xc) | |
316 | { | |
317 | u8 he, cppr; | |
318 | u16 ack; | |
319 | ||
320 | /* Perform the acknowledge hypervisor to register cycle */ | |
321 | ack = be16_to_cpu(__raw_readw(xive_tima + TM_SPC_ACK_HV_REG)); | |
322 | ||
323 | /* Synchronize subsequent queue accesses */ | |
324 | mb(); | |
325 | ||
326 | /* | |
327 | * Grab the CPPR and the "HE" field which indicates the source | |
328 | * of the hypervisor interrupt (if any) | |
329 | */ | |
330 | cppr = ack & 0xff; | |
8a792262 | 331 | he = (ack >> 8) >> 6; |
243e2511 BH |
332 | switch(he) { |
333 | case TM_QW3_NSR_HE_NONE: /* Nothing to see here */ | |
334 | break; | |
335 | case TM_QW3_NSR_HE_PHYS: /* Physical thread interrupt */ | |
336 | if (cppr == 0xff) | |
337 | return; | |
338 | /* Mark the priority pending */ | |
339 | xc->pending_prio |= 1 << cppr; | |
340 | ||
341 | /* | |
342 | * A new interrupt should never have a CPPR less favored | |
343 | * than our current one. | |
344 | */ | |
345 | if (cppr >= xc->cppr) | |
346 | pr_err("CPU %d odd ack CPPR, got %d at %d\n", | |
347 | smp_processor_id(), cppr, xc->cppr); | |
348 | ||
349 | /* Update our idea of what the CPPR is */ | |
350 | xc->cppr = cppr; | |
351 | break; | |
352 | case TM_QW3_NSR_HE_POOL: /* HV Pool interrupt (unused) */ | |
353 | case TM_QW3_NSR_HE_LSI: /* Legacy FW LSI (unused) */ | |
354 | pr_err("CPU %d got unexpected interrupt type HE=%d\n", | |
355 | smp_processor_id(), he); | |
356 | return; | |
357 | } | |
358 | } | |
359 | ||
360 | static void xive_native_eoi(u32 hw_irq) | |
361 | { | |
362 | /* | |
363 | * Not normally used except if specific interrupts need | |
364 | * a workaround on EOI. | |
365 | */ | |
366 | opal_int_eoi(hw_irq); | |
367 | } | |
368 | ||
369 | static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc) | |
370 | { | |
371 | s64 rc; | |
372 | u32 vp; | |
373 | __be64 vp_cam_be; | |
374 | u64 vp_cam; | |
375 | ||
376 | if (xive_pool_vps == XIVE_INVALID_VP) | |
377 | return; | |
378 | ||
b32e56e5 BH |
379 | /* Check if pool VP already active, if it is, pull it */ |
380 | if (in_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2) & TM_QW2W2_VP) | |
381 | in_be64(xive_tima + TM_SPC_PULL_POOL_CTX); | |
382 | ||
243e2511 | 383 | /* Enable the pool VP */ |
5af50993 | 384 | vp = xive_pool_vps + cpu; |
243e2511 BH |
385 | for (;;) { |
386 | rc = opal_xive_set_vp_info(vp, OPAL_XIVE_VP_ENABLED, 0); | |
387 | if (rc != OPAL_BUSY) | |
388 | break; | |
9c3250a1 | 389 | msleep(OPAL_BUSY_DELAY_MS); |
243e2511 BH |
390 | } |
391 | if (rc) { | |
392 | pr_err("Failed to enable pool VP on CPU %d\n", cpu); | |
393 | return; | |
394 | } | |
395 | ||
396 | /* Grab it's CAM value */ | |
397 | rc = opal_xive_get_vp_info(vp, NULL, &vp_cam_be, NULL, NULL); | |
398 | if (rc) { | |
399 | pr_err("Failed to get pool VP info CPU %d\n", cpu); | |
400 | return; | |
401 | } | |
402 | vp_cam = be64_to_cpu(vp_cam_be); | |
403 | ||
243e2511 | 404 | /* Push it on the CPU (set LSMFB to 0xff to skip backlog scan) */ |
243e2511 | 405 | out_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD0, 0xff); |
dbc57402 | 406 | out_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2, TM_QW2W2_VP | vp_cam); |
243e2511 BH |
407 | } |
408 | ||
409 | static void xive_native_teardown_cpu(unsigned int cpu, struct xive_cpu *xc) | |
410 | { | |
411 | s64 rc; | |
412 | u32 vp; | |
413 | ||
414 | if (xive_pool_vps == XIVE_INVALID_VP) | |
415 | return; | |
416 | ||
417 | /* Pull the pool VP from the CPU */ | |
418 | in_be64(xive_tima + TM_SPC_PULL_POOL_CTX); | |
419 | ||
420 | /* Disable it */ | |
5af50993 | 421 | vp = xive_pool_vps + cpu; |
243e2511 BH |
422 | for (;;) { |
423 | rc = opal_xive_set_vp_info(vp, 0, 0); | |
424 | if (rc != OPAL_BUSY) | |
425 | break; | |
9c3250a1 | 426 | msleep(OPAL_BUSY_DELAY_MS); |
243e2511 BH |
427 | } |
428 | } | |
429 | ||
5af50993 | 430 | void xive_native_sync_source(u32 hw_irq) |
243e2511 BH |
431 | { |
432 | opal_xive_sync(XIVE_SYNC_EAS, hw_irq); | |
433 | } | |
5af50993 | 434 | EXPORT_SYMBOL_GPL(xive_native_sync_source); |
243e2511 | 435 | |
88ec6b93 CLG |
436 | void xive_native_sync_queue(u32 hw_irq) |
437 | { | |
438 | opal_xive_sync(XIVE_SYNC_QUEUE, hw_irq); | |
439 | } | |
440 | EXPORT_SYMBOL_GPL(xive_native_sync_queue); | |
441 | ||
243e2511 BH |
442 | static const struct xive_ops xive_native_ops = { |
443 | .populate_irq_data = xive_native_populate_irq_data, | |
444 | .configure_irq = xive_native_configure_irq, | |
445 | .setup_queue = xive_native_setup_queue, | |
446 | .cleanup_queue = xive_native_cleanup_queue, | |
447 | .match = xive_native_match, | |
448 | .shutdown = xive_native_shutdown, | |
449 | .update_pending = xive_native_update_pending, | |
450 | .eoi = xive_native_eoi, | |
451 | .setup_cpu = xive_native_setup_cpu, | |
452 | .teardown_cpu = xive_native_teardown_cpu, | |
453 | .sync_source = xive_native_sync_source, | |
454 | #ifdef CONFIG_SMP | |
455 | .get_ipi = xive_native_get_ipi, | |
456 | .put_ipi = xive_native_put_ipi, | |
457 | #endif /* CONFIG_SMP */ | |
458 | .name = "native", | |
459 | }; | |
460 | ||
461 | static bool xive_parse_provisioning(struct device_node *np) | |
462 | { | |
463 | int rc; | |
464 | ||
465 | if (of_property_read_u32(np, "ibm,xive-provision-page-size", | |
466 | &xive_provision_size) < 0) | |
467 | return true; | |
468 | rc = of_property_count_elems_of_size(np, "ibm,xive-provision-chips", 4); | |
469 | if (rc < 0) { | |
470 | pr_err("Error %d getting provision chips array\n", rc); | |
471 | return false; | |
472 | } | |
473 | xive_provision_chip_count = rc; | |
474 | if (rc == 0) | |
475 | return true; | |
476 | ||
6396bb22 | 477 | xive_provision_chips = kcalloc(4, xive_provision_chip_count, |
243e2511 BH |
478 | GFP_KERNEL); |
479 | if (WARN_ON(!xive_provision_chips)) | |
480 | return false; | |
481 | ||
482 | rc = of_property_read_u32_array(np, "ibm,xive-provision-chips", | |
483 | xive_provision_chips, | |
484 | xive_provision_chip_count); | |
485 | if (rc < 0) { | |
486 | pr_err("Error %d reading provision chips array\n", rc); | |
487 | return false; | |
488 | } | |
489 | ||
490 | xive_provision_cache = kmem_cache_create("xive-provision", | |
491 | xive_provision_size, | |
492 | xive_provision_size, | |
493 | 0, NULL); | |
494 | if (!xive_provision_cache) { | |
495 | pr_err("Failed to allocate provision cache\n"); | |
496 | return false; | |
497 | } | |
498 | return true; | |
499 | } | |
500 | ||
5af50993 BH |
501 | static void xive_native_setup_pools(void) |
502 | { | |
503 | /* Allocate a pool big enough */ | |
9b130ad5 | 504 | pr_debug("XIVE: Allocating VP block for pool size %u\n", nr_cpu_ids); |
5af50993 BH |
505 | |
506 | xive_pool_vps = xive_native_alloc_vp_block(nr_cpu_ids); | |
507 | if (WARN_ON(xive_pool_vps == XIVE_INVALID_VP)) | |
508 | pr_err("XIVE: Failed to allocate pool VP, KVM might not function\n"); | |
509 | ||
9b130ad5 | 510 | pr_debug("XIVE: Pool VPs allocated at 0x%x for %u max CPUs\n", |
5af50993 BH |
511 | xive_pool_vps, nr_cpu_ids); |
512 | } | |
513 | ||
243e2511 BH |
514 | u32 xive_native_default_eq_shift(void) |
515 | { | |
516 | return xive_queue_shift; | |
517 | } | |
5af50993 | 518 | EXPORT_SYMBOL_GPL(xive_native_default_eq_shift); |
243e2511 | 519 | |
39e9af3d CLG |
520 | unsigned long xive_tima_os; |
521 | EXPORT_SYMBOL_GPL(xive_tima_os); | |
522 | ||
df4c7983 | 523 | bool __init xive_native_init(void) |
243e2511 BH |
524 | { |
525 | struct device_node *np; | |
526 | struct resource r; | |
527 | void __iomem *tima; | |
528 | struct property *prop; | |
529 | u8 max_prio = 7; | |
530 | const __be32 *p; | |
5af50993 | 531 | u32 val, cpu; |
243e2511 BH |
532 | s64 rc; |
533 | ||
534 | if (xive_cmdline_disabled) | |
535 | return false; | |
536 | ||
537 | pr_devel("xive_native_init()\n"); | |
538 | np = of_find_compatible_node(NULL, NULL, "ibm,opal-xive-pe"); | |
539 | if (!np) { | |
540 | pr_devel("not found !\n"); | |
541 | return false; | |
542 | } | |
b7c670d6 | 543 | pr_devel("Found %pOF\n", np); |
243e2511 BH |
544 | |
545 | /* Resource 1 is HV window */ | |
546 | if (of_address_to_resource(np, 1, &r)) { | |
547 | pr_err("Failed to get thread mgmnt area resource\n"); | |
548 | return false; | |
549 | } | |
550 | tima = ioremap(r.start, resource_size(&r)); | |
551 | if (!tima) { | |
552 | pr_err("Failed to map thread mgmnt area\n"); | |
553 | return false; | |
554 | } | |
555 | ||
556 | /* Read number of priorities */ | |
557 | if (of_property_read_u32(np, "ibm,xive-#priorities", &val) == 0) | |
558 | max_prio = val - 1; | |
559 | ||
560 | /* Iterate the EQ sizes and pick one */ | |
561 | of_property_for_each_u32(np, "ibm,xive-eq-sizes", prop, p, val) { | |
562 | xive_queue_shift = val; | |
563 | if (val == PAGE_SHIFT) | |
564 | break; | |
565 | } | |
566 | ||
bf4159da BH |
567 | /* Do we support single escalation */ |
568 | if (of_get_property(np, "single-escalation-support", NULL) != NULL) | |
569 | xive_has_single_esc = true; | |
570 | ||
5af50993 BH |
571 | /* Configure Thread Management areas for KVM */ |
572 | for_each_possible_cpu(cpu) | |
573 | kvmppc_set_xive_tima(cpu, r.start, tima); | |
574 | ||
39e9af3d CLG |
575 | /* Resource 2 is OS window */ |
576 | if (of_address_to_resource(np, 2, &r)) { | |
577 | pr_err("Failed to get thread mgmnt area resource\n"); | |
578 | return false; | |
579 | } | |
580 | ||
581 | xive_tima_os = r.start; | |
582 | ||
5af50993 | 583 | /* Grab size of provisionning pages */ |
243e2511 BH |
584 | xive_parse_provisioning(np); |
585 | ||
586 | /* Switch the XIVE to exploitation mode */ | |
587 | rc = opal_xive_reset(OPAL_XIVE_MODE_EXPL); | |
588 | if (rc) { | |
589 | pr_err("Switch to exploitation mode failed with error %lld\n", rc); | |
590 | return false; | |
591 | } | |
592 | ||
5af50993 BH |
593 | /* Setup some dummy HV pool VPs */ |
594 | xive_native_setup_pools(); | |
595 | ||
243e2511 BH |
596 | /* Initialize XIVE core with our backend */ |
597 | if (!xive_core_init(&xive_native_ops, tima, TM_QW3_HV_PHYS, | |
598 | max_prio)) { | |
599 | opal_xive_reset(OPAL_XIVE_MODE_EMU); | |
600 | return false; | |
601 | } | |
602 | pr_info("Using %dkB queues\n", 1 << (xive_queue_shift - 10)); | |
603 | return true; | |
604 | } | |
605 | ||
606 | static bool xive_native_provision_pages(void) | |
607 | { | |
608 | u32 i; | |
609 | void *p; | |
610 | ||
611 | for (i = 0; i < xive_provision_chip_count; i++) { | |
612 | u32 chip = xive_provision_chips[i]; | |
613 | ||
614 | /* | |
615 | * XXX TODO: Try to make the allocation local to the node where | |
616 | * the chip resides. | |
617 | */ | |
618 | p = kmem_cache_alloc(xive_provision_cache, GFP_KERNEL); | |
619 | if (!p) { | |
620 | pr_err("Failed to allocate provisioning page\n"); | |
621 | return false; | |
622 | } | |
623 | opal_xive_donate_page(chip, __pa(p)); | |
624 | } | |
625 | return true; | |
626 | } | |
627 | ||
628 | u32 xive_native_alloc_vp_block(u32 max_vcpus) | |
629 | { | |
630 | s64 rc; | |
631 | u32 order; | |
632 | ||
633 | order = fls(max_vcpus) - 1; | |
634 | if (max_vcpus > (1 << order)) | |
635 | order++; | |
636 | ||
89d8bb16 BH |
637 | pr_debug("VP block alloc, for max VCPUs %d use order %d\n", |
638 | max_vcpus, order); | |
243e2511 BH |
639 | |
640 | for (;;) { | |
641 | rc = opal_xive_alloc_vp_block(order); | |
642 | switch (rc) { | |
643 | case OPAL_BUSY: | |
9c3250a1 | 644 | msleep(OPAL_BUSY_DELAY_MS); |
243e2511 BH |
645 | break; |
646 | case OPAL_XIVE_PROVISIONING: | |
647 | if (!xive_native_provision_pages()) | |
648 | return XIVE_INVALID_VP; | |
649 | break; | |
650 | default: | |
651 | if (rc < 0) { | |
652 | pr_err("OPAL failed to allocate VCPUs order %d, err %lld\n", | |
653 | order, rc); | |
654 | return XIVE_INVALID_VP; | |
655 | } | |
656 | return rc; | |
657 | } | |
658 | } | |
659 | } | |
660 | EXPORT_SYMBOL_GPL(xive_native_alloc_vp_block); | |
661 | ||
662 | void xive_native_free_vp_block(u32 vp_base) | |
663 | { | |
664 | s64 rc; | |
665 | ||
666 | if (vp_base == XIVE_INVALID_VP) | |
667 | return; | |
668 | ||
669 | rc = opal_xive_free_vp_block(vp_base); | |
670 | if (rc < 0) | |
671 | pr_warn("OPAL error %lld freeing VP block\n", rc); | |
672 | } | |
673 | EXPORT_SYMBOL_GPL(xive_native_free_vp_block); | |
5af50993 | 674 | |
bf4159da | 675 | int xive_native_enable_vp(u32 vp_id, bool single_escalation) |
5af50993 BH |
676 | { |
677 | s64 rc; | |
bf4159da | 678 | u64 flags = OPAL_XIVE_VP_ENABLED; |
5af50993 | 679 | |
bf4159da BH |
680 | if (single_escalation) |
681 | flags |= OPAL_XIVE_VP_SINGLE_ESCALATION; | |
5af50993 | 682 | for (;;) { |
bf4159da | 683 | rc = opal_xive_set_vp_info(vp_id, flags, 0); |
5af50993 BH |
684 | if (rc != OPAL_BUSY) |
685 | break; | |
9c3250a1 | 686 | msleep(OPAL_BUSY_DELAY_MS); |
5af50993 BH |
687 | } |
688 | return rc ? -EIO : 0; | |
689 | } | |
690 | EXPORT_SYMBOL_GPL(xive_native_enable_vp); | |
691 | ||
692 | int xive_native_disable_vp(u32 vp_id) | |
693 | { | |
694 | s64 rc; | |
695 | ||
696 | for (;;) { | |
697 | rc = opal_xive_set_vp_info(vp_id, 0, 0); | |
698 | if (rc != OPAL_BUSY) | |
699 | break; | |
9c3250a1 | 700 | msleep(OPAL_BUSY_DELAY_MS); |
5af50993 BH |
701 | } |
702 | return rc ? -EIO : 0; | |
703 | } | |
704 | EXPORT_SYMBOL_GPL(xive_native_disable_vp); | |
705 | ||
706 | int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id) | |
707 | { | |
708 | __be64 vp_cam_be; | |
709 | __be32 vp_chip_id_be; | |
710 | s64 rc; | |
711 | ||
712 | rc = opal_xive_get_vp_info(vp_id, NULL, &vp_cam_be, NULL, &vp_chip_id_be); | |
713 | if (rc) | |
714 | return -EIO; | |
715 | *out_cam_id = be64_to_cpu(vp_cam_be) & 0xffffffffu; | |
716 | *out_chip_id = be32_to_cpu(vp_chip_id_be); | |
717 | ||
718 | return 0; | |
719 | } | |
720 | EXPORT_SYMBOL_GPL(xive_native_get_vp_info); | |
bf4159da BH |
721 | |
722 | bool xive_native_has_single_escalation(void) | |
723 | { | |
724 | return xive_has_single_esc; | |
725 | } | |
726 | EXPORT_SYMBOL_GPL(xive_native_has_single_escalation); | |
88ec6b93 CLG |
727 | |
728 | int xive_native_get_queue_info(u32 vp_id, u32 prio, | |
729 | u64 *out_qpage, | |
730 | u64 *out_qsize, | |
731 | u64 *out_qeoi_page, | |
732 | u32 *out_escalate_irq, | |
733 | u64 *out_qflags) | |
734 | { | |
735 | __be64 qpage; | |
736 | __be64 qsize; | |
737 | __be64 qeoi_page; | |
738 | __be32 escalate_irq; | |
739 | __be64 qflags; | |
740 | s64 rc; | |
741 | ||
742 | rc = opal_xive_get_queue_info(vp_id, prio, &qpage, &qsize, | |
743 | &qeoi_page, &escalate_irq, &qflags); | |
744 | if (rc) { | |
745 | pr_err("OPAL failed to get queue info for VCPU %d/%d : %lld\n", | |
746 | vp_id, prio, rc); | |
747 | return -EIO; | |
748 | } | |
749 | ||
750 | if (out_qpage) | |
751 | *out_qpage = be64_to_cpu(qpage); | |
752 | if (out_qsize) | |
753 | *out_qsize = be32_to_cpu(qsize); | |
754 | if (out_qeoi_page) | |
755 | *out_qeoi_page = be64_to_cpu(qeoi_page); | |
756 | if (out_escalate_irq) | |
757 | *out_escalate_irq = be32_to_cpu(escalate_irq); | |
758 | if (out_qflags) | |
759 | *out_qflags = be64_to_cpu(qflags); | |
760 | ||
761 | return 0; | |
762 | } | |
763 | EXPORT_SYMBOL_GPL(xive_native_get_queue_info); | |
764 | ||
765 | int xive_native_get_queue_state(u32 vp_id, u32 prio, u32 *qtoggle, u32 *qindex) | |
766 | { | |
767 | __be32 opal_qtoggle; | |
768 | __be32 opal_qindex; | |
769 | s64 rc; | |
770 | ||
771 | rc = opal_xive_get_queue_state(vp_id, prio, &opal_qtoggle, | |
772 | &opal_qindex); | |
773 | if (rc) { | |
774 | pr_err("OPAL failed to get queue state for VCPU %d/%d : %lld\n", | |
775 | vp_id, prio, rc); | |
776 | return -EIO; | |
777 | } | |
778 | ||
779 | if (qtoggle) | |
780 | *qtoggle = be32_to_cpu(opal_qtoggle); | |
781 | if (qindex) | |
782 | *qindex = be32_to_cpu(opal_qindex); | |
783 | ||
784 | return 0; | |
785 | } | |
786 | EXPORT_SYMBOL_GPL(xive_native_get_queue_state); | |
787 | ||
788 | int xive_native_set_queue_state(u32 vp_id, u32 prio, u32 qtoggle, u32 qindex) | |
789 | { | |
790 | s64 rc; | |
791 | ||
792 | rc = opal_xive_set_queue_state(vp_id, prio, qtoggle, qindex); | |
793 | if (rc) { | |
794 | pr_err("OPAL failed to set queue state for VCPU %d/%d : %lld\n", | |
795 | vp_id, prio, rc); | |
796 | return -EIO; | |
797 | } | |
798 | ||
799 | return 0; | |
800 | } | |
801 | EXPORT_SYMBOL_GPL(xive_native_set_queue_state); | |
802 | ||
803 | int xive_native_get_vp_state(u32 vp_id, u64 *out_state) | |
804 | { | |
805 | __be64 state; | |
806 | s64 rc; | |
807 | ||
808 | rc = opal_xive_get_vp_state(vp_id, &state); | |
809 | if (rc) { | |
810 | pr_err("OPAL failed to get vp state for VCPU %d : %lld\n", | |
811 | vp_id, rc); | |
812 | return -EIO; | |
813 | } | |
814 | ||
815 | if (out_state) | |
816 | *out_state = be64_to_cpu(state); | |
817 | return 0; | |
818 | } | |
819 | EXPORT_SYMBOL_GPL(xive_native_get_vp_state); |