1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright 2016,2017 IBM Corporation.
6 #define pr_fmt(fmt) "xive: " fmt
8 #include <linux/types.h>
10 #include <linux/smp.h>
11 #include <linux/interrupt.h>
12 #include <linux/init.h>
14 #include <linux/slab.h>
15 #include <linux/spinlock.h>
16 #include <linux/cpumask.h>
18 #include <linux/delay.h>
24 #include <asm/errno.h>
26 #include <asm/xive-regs.h>
27 #include <asm/hvcall.h>
29 #include "xive-internal.h"
31 static u32 xive_queue_shift
;
33 struct xive_irq_bitmap
{
34 unsigned long *bitmap
;
38 struct list_head list
;
41 static LIST_HEAD(xive_irq_bitmaps
);
43 static int xive_irq_bitmap_add(int base
, int count
)
45 struct xive_irq_bitmap
*xibm
;
47 xibm
= kzalloc(sizeof(*xibm
), GFP_ATOMIC
);
51 spin_lock_init(&xibm
->lock
);
54 xibm
->bitmap
= kzalloc(xibm
->count
, GFP_KERNEL
);
55 list_add(&xibm
->list
, &xive_irq_bitmaps
);
57 pr_info("Using IRQ range [%x-%x]", xibm
->base
,
58 xibm
->base
+ xibm
->count
- 1);
62 static int __xive_irq_bitmap_alloc(struct xive_irq_bitmap
*xibm
)
66 irq
= find_first_zero_bit(xibm
->bitmap
, xibm
->count
);
67 if (irq
!= xibm
->count
) {
68 set_bit(irq
, xibm
->bitmap
);
77 static int xive_irq_bitmap_alloc(void)
79 struct xive_irq_bitmap
*xibm
;
83 list_for_each_entry(xibm
, &xive_irq_bitmaps
, list
) {
84 spin_lock_irqsave(&xibm
->lock
, flags
);
85 irq
= __xive_irq_bitmap_alloc(xibm
);
86 spin_unlock_irqrestore(&xibm
->lock
, flags
);
93 static void xive_irq_bitmap_free(int irq
)
96 struct xive_irq_bitmap
*xibm
;
98 list_for_each_entry(xibm
, &xive_irq_bitmaps
, list
) {
99 if ((irq
>= xibm
->base
) && (irq
< xibm
->base
+ xibm
->count
)) {
100 spin_lock_irqsave(&xibm
->lock
, flags
);
101 clear_bit(irq
- xibm
->base
, xibm
->bitmap
);
102 spin_unlock_irqrestore(&xibm
->lock
, flags
);
109 /* Based on the similar routines in RTAS */
110 static unsigned int plpar_busy_delay_time(long rc
)
114 if (H_IS_LONG_BUSY(rc
)) {
115 ms
= get_longbusy_msecs(rc
);
116 } else if (rc
== H_BUSY
) {
117 ms
= 10; /* seems appropriate for XIVE hcalls */
123 static unsigned int plpar_busy_delay(int rc
)
127 ms
= plpar_busy_delay_time(rc
);
135 * Note: this call has a partition wide scope and can take a while to
136 * complete. If it returns H_LONG_BUSY_* it should be retried
139 static long plpar_int_reset(unsigned long flags
)
144 rc
= plpar_hcall_norets(H_INT_RESET
, flags
);
145 } while (plpar_busy_delay(rc
));
148 pr_err("H_INT_RESET failed %ld\n", rc
);
153 static long plpar_int_get_source_info(unsigned long flags
,
155 unsigned long *src_flags
,
156 unsigned long *eoi_page
,
157 unsigned long *trig_page
,
158 unsigned long *esb_shift
)
160 unsigned long retbuf
[PLPAR_HCALL_BUFSIZE
];
164 rc
= plpar_hcall(H_INT_GET_SOURCE_INFO
, retbuf
, flags
, lisn
);
165 } while (plpar_busy_delay(rc
));
168 pr_err("H_INT_GET_SOURCE_INFO lisn=%ld failed %ld\n", lisn
, rc
);
172 *src_flags
= retbuf
[0];
173 *eoi_page
= retbuf
[1];
174 *trig_page
= retbuf
[2];
175 *esb_shift
= retbuf
[3];
177 pr_devel("H_INT_GET_SOURCE_INFO flags=%lx eoi=%lx trig=%lx shift=%lx\n",
178 retbuf
[0], retbuf
[1], retbuf
[2], retbuf
[3]);
183 #define XIVE_SRC_SET_EISN (1ull << (63 - 62))
184 #define XIVE_SRC_MASK (1ull << (63 - 63)) /* unused */
186 static long plpar_int_set_source_config(unsigned long flags
,
188 unsigned long target
,
190 unsigned long sw_irq
)
195 pr_devel("H_INT_SET_SOURCE_CONFIG flags=%lx lisn=%lx target=%lx prio=%lx sw_irq=%lx\n",
196 flags
, lisn
, target
, prio
, sw_irq
);
200 rc
= plpar_hcall_norets(H_INT_SET_SOURCE_CONFIG
, flags
, lisn
,
201 target
, prio
, sw_irq
);
202 } while (plpar_busy_delay(rc
));
205 pr_err("H_INT_SET_SOURCE_CONFIG lisn=%ld target=%lx prio=%lx failed %ld\n",
206 lisn
, target
, prio
, rc
);
213 static long plpar_int_get_queue_info(unsigned long flags
,
214 unsigned long target
,
215 unsigned long priority
,
216 unsigned long *esn_page
,
217 unsigned long *esn_size
)
219 unsigned long retbuf
[PLPAR_HCALL_BUFSIZE
];
223 rc
= plpar_hcall(H_INT_GET_QUEUE_INFO
, retbuf
, flags
, target
,
225 } while (plpar_busy_delay(rc
));
228 pr_err("H_INT_GET_QUEUE_INFO cpu=%ld prio=%ld failed %ld\n",
229 target
, priority
, rc
);
233 *esn_page
= retbuf
[0];
234 *esn_size
= retbuf
[1];
236 pr_devel("H_INT_GET_QUEUE_INFO page=%lx size=%lx\n",
237 retbuf
[0], retbuf
[1]);
242 #define XIVE_EQ_ALWAYS_NOTIFY (1ull << (63 - 63))
244 static long plpar_int_set_queue_config(unsigned long flags
,
245 unsigned long target
,
246 unsigned long priority
,
252 pr_devel("H_INT_SET_QUEUE_CONFIG flags=%lx target=%lx priority=%lx qpage=%lx qsize=%lx\n",
253 flags
, target
, priority
, qpage
, qsize
);
256 rc
= plpar_hcall_norets(H_INT_SET_QUEUE_CONFIG
, flags
, target
,
257 priority
, qpage
, qsize
);
258 } while (plpar_busy_delay(rc
));
261 pr_err("H_INT_SET_QUEUE_CONFIG cpu=%ld prio=%ld qpage=%lx returned %ld\n",
262 target
, priority
, qpage
, rc
);
269 static long plpar_int_sync(unsigned long flags
, unsigned long lisn
)
274 rc
= plpar_hcall_norets(H_INT_SYNC
, flags
, lisn
);
275 } while (plpar_busy_delay(rc
));
278 pr_err("H_INT_SYNC lisn=%ld returned %ld\n", lisn
, rc
);
285 #define XIVE_ESB_FLAG_STORE (1ull << (63 - 63))
287 static long plpar_int_esb(unsigned long flags
,
289 unsigned long offset
,
290 unsigned long in_data
,
291 unsigned long *out_data
)
293 unsigned long retbuf
[PLPAR_HCALL_BUFSIZE
];
296 pr_devel("H_INT_ESB flags=%lx lisn=%lx offset=%lx in=%lx\n",
297 flags
, lisn
, offset
, in_data
);
300 rc
= plpar_hcall(H_INT_ESB
, retbuf
, flags
, lisn
, offset
,
302 } while (plpar_busy_delay(rc
));
305 pr_err("H_INT_ESB lisn=%ld offset=%ld returned %ld\n",
310 *out_data
= retbuf
[0];
315 static u64
xive_spapr_esb_rw(u32 lisn
, u32 offset
, u64 data
, bool write
)
317 unsigned long read_data
;
320 rc
= plpar_int_esb(write
? XIVE_ESB_FLAG_STORE
: 0,
321 lisn
, offset
, data
, &read_data
);
325 return write
? 0 : read_data
;
328 #define XIVE_SRC_H_INT_ESB (1ull << (63 - 60))
329 #define XIVE_SRC_LSI (1ull << (63 - 61))
330 #define XIVE_SRC_TRIGGER (1ull << (63 - 62))
331 #define XIVE_SRC_STORE_EOI (1ull << (63 - 63))
333 static int xive_spapr_populate_irq_data(u32 hw_irq
, struct xive_irq_data
*data
)
337 unsigned long eoi_page
;
338 unsigned long trig_page
;
339 unsigned long esb_shift
;
341 memset(data
, 0, sizeof(*data
));
343 rc
= plpar_int_get_source_info(0, hw_irq
, &flags
, &eoi_page
, &trig_page
,
348 if (flags
& XIVE_SRC_H_INT_ESB
)
349 data
->flags
|= XIVE_IRQ_FLAG_H_INT_ESB
;
350 if (flags
& XIVE_SRC_STORE_EOI
)
351 data
->flags
|= XIVE_IRQ_FLAG_STORE_EOI
;
352 if (flags
& XIVE_SRC_LSI
)
353 data
->flags
|= XIVE_IRQ_FLAG_LSI
;
354 data
->eoi_page
= eoi_page
;
355 data
->esb_shift
= esb_shift
;
356 data
->trig_page
= trig_page
;
359 * No chip-id for the sPAPR backend. This has an impact how we
360 * pick a target. See xive_pick_irq_target().
362 data
->src_chip
= XIVE_INVALID_CHIP_ID
;
364 data
->eoi_mmio
= ioremap(data
->eoi_page
, 1u << data
->esb_shift
);
365 if (!data
->eoi_mmio
) {
366 pr_err("Failed to map EOI page for irq 0x%x\n", hw_irq
);
370 data
->hw_irq
= hw_irq
;
372 /* Full function page supports trigger */
373 if (flags
& XIVE_SRC_TRIGGER
) {
374 data
->trig_mmio
= data
->eoi_mmio
;
378 data
->trig_mmio
= ioremap(data
->trig_page
, 1u << data
->esb_shift
);
379 if (!data
->trig_mmio
) {
380 pr_err("Failed to map trigger page for irq 0x%x\n", hw_irq
);
386 static int xive_spapr_configure_irq(u32 hw_irq
, u32 target
, u8 prio
, u32 sw_irq
)
390 rc
= plpar_int_set_source_config(XIVE_SRC_SET_EISN
, hw_irq
, target
,
393 return rc
== 0 ? 0 : -ENXIO
;
396 /* This can be called multiple time to change a queue configuration */
397 static int xive_spapr_configure_queue(u32 target
, struct xive_q
*q
, u8 prio
,
398 __be32
*qpage
, u32 order
)
401 unsigned long esn_page
;
402 unsigned long esn_size
;
403 u64 flags
, qpage_phys
;
405 /* If there's an actual queue page, clean it */
409 qpage_phys
= __pa(qpage
);
414 /* Initialize the rest of the fields */
415 q
->msk
= order
? ((1u << (order
- 2)) - 1) : 0;
419 rc
= plpar_int_get_queue_info(0, target
, prio
, &esn_page
, &esn_size
);
421 pr_err("Error %lld getting queue info CPU %d prio %d\n", rc
,
427 /* TODO: add support for the notification page */
428 q
->eoi_phys
= esn_page
;
430 /* Default is to always notify */
431 flags
= XIVE_EQ_ALWAYS_NOTIFY
;
433 /* Configure and enable the queue in HW */
434 rc
= plpar_int_set_queue_config(flags
, target
, prio
, qpage_phys
, order
);
436 pr_err("Error %lld setting queue for CPU %d prio %d\n", rc
,
446 static int xive_spapr_setup_queue(unsigned int cpu
, struct xive_cpu
*xc
,
449 struct xive_q
*q
= &xc
->queue
[prio
];
452 qpage
= xive_queue_page_alloc(cpu
, xive_queue_shift
);
454 return PTR_ERR(qpage
);
456 return xive_spapr_configure_queue(get_hard_smp_processor_id(cpu
),
457 q
, prio
, qpage
, xive_queue_shift
);
460 static void xive_spapr_cleanup_queue(unsigned int cpu
, struct xive_cpu
*xc
,
463 struct xive_q
*q
= &xc
->queue
[prio
];
464 unsigned int alloc_order
;
466 int hw_cpu
= get_hard_smp_processor_id(cpu
);
468 rc
= plpar_int_set_queue_config(0, hw_cpu
, prio
, 0, 0);
470 pr_err("Error %ld setting queue for CPU %d prio %d\n", rc
,
473 alloc_order
= xive_alloc_order(xive_queue_shift
);
474 free_pages((unsigned long)q
->qpage
, alloc_order
);
478 static bool xive_spapr_match(struct device_node
*node
)
480 /* Ignore cascaded controllers for the moment */
485 static int xive_spapr_get_ipi(unsigned int cpu
, struct xive_cpu
*xc
)
487 int irq
= xive_irq_bitmap_alloc();
490 pr_err("Failed to allocate IPI on CPU %d\n", cpu
);
498 static void xive_spapr_put_ipi(unsigned int cpu
, struct xive_cpu
*xc
)
503 xive_irq_bitmap_free(xc
->hw_ipi
);
506 #endif /* CONFIG_SMP */
508 static void xive_spapr_shutdown(void)
514 * Perform an "ack" cycle on the current thread. Grab the pending
515 * active priorities and update the CPPR to the most favored one.
517 static void xive_spapr_update_pending(struct xive_cpu
*xc
)
523 * Perform the "Acknowledge O/S to Register" cycle.
525 * Let's speedup the access to the TIMA using the raw I/O
526 * accessor as we don't need the synchronisation routine of
527 * the higher level ones
529 ack
= be16_to_cpu(__raw_readw(xive_tima
+ TM_SPC_ACK_OS_REG
));
531 /* Synchronize subsequent queue accesses */
535 * Grab the CPPR and the "NSR" field which indicates the source
536 * of the interrupt (if any)
541 if (nsr
& TM_QW1_NSR_EO
) {
544 /* Mark the priority pending */
545 xc
->pending_prio
|= 1 << cppr
;
548 * A new interrupt should never have a CPPR less favored
549 * than our current one.
551 if (cppr
>= xc
->cppr
)
552 pr_err("CPU %d odd ack CPPR, got %d at %d\n",
553 smp_processor_id(), cppr
, xc
->cppr
);
555 /* Update our idea of what the CPPR is */
560 static void xive_spapr_eoi(u32 hw_irq
)
565 static void xive_spapr_setup_cpu(unsigned int cpu
, struct xive_cpu
*xc
)
567 /* Only some debug on the TIMA settings */
568 pr_debug("(HW value: %08x %08x %08x)\n",
569 in_be32(xive_tima
+ TM_QW1_OS
+ TM_WORD0
),
570 in_be32(xive_tima
+ TM_QW1_OS
+ TM_WORD1
),
571 in_be32(xive_tima
+ TM_QW1_OS
+ TM_WORD2
));
574 static void xive_spapr_teardown_cpu(unsigned int cpu
, struct xive_cpu
*xc
)
579 static void xive_spapr_sync_source(u32 hw_irq
)
581 /* Specs are unclear on what this is doing */
582 plpar_int_sync(0, hw_irq
);
585 static const struct xive_ops xive_spapr_ops
= {
586 .populate_irq_data
= xive_spapr_populate_irq_data
,
587 .configure_irq
= xive_spapr_configure_irq
,
588 .setup_queue
= xive_spapr_setup_queue
,
589 .cleanup_queue
= xive_spapr_cleanup_queue
,
590 .match
= xive_spapr_match
,
591 .shutdown
= xive_spapr_shutdown
,
592 .update_pending
= xive_spapr_update_pending
,
593 .eoi
= xive_spapr_eoi
,
594 .setup_cpu
= xive_spapr_setup_cpu
,
595 .teardown_cpu
= xive_spapr_teardown_cpu
,
596 .sync_source
= xive_spapr_sync_source
,
597 .esb_rw
= xive_spapr_esb_rw
,
599 .get_ipi
= xive_spapr_get_ipi
,
600 .put_ipi
= xive_spapr_put_ipi
,
601 #endif /* CONFIG_SMP */
606 * get max priority from "/ibm,plat-res-int-priorities"
608 static bool xive_get_max_prio(u8
*max_prio
)
610 struct device_node
*rootdn
;
615 rootdn
= of_find_node_by_path("/");
617 pr_err("not root node found !\n");
621 reg
= of_get_property(rootdn
, "ibm,plat-res-int-priorities", &len
);
623 pr_err("Failed to read 'ibm,plat-res-int-priorities' property\n");
627 if (len
% (2 * sizeof(u32
)) != 0) {
628 pr_err("invalid 'ibm,plat-res-int-priorities' property\n");
632 /* HW supports priorities in the range [0-7] and 0xFF is a
633 * wildcard priority used to mask. We scan the ranges reserved
634 * by the hypervisor to find the lowest priority we can use.
637 for (prio
= 0; prio
< 8; prio
++) {
641 for (i
= 0; i
< len
/ (2 * sizeof(u32
)); i
++) {
642 int base
= be32_to_cpu(reg
[2 * i
]);
643 int range
= be32_to_cpu(reg
[2 * i
+ 1]);
645 if (prio
>= base
&& prio
< base
+ range
)
654 pr_err("no valid priority found in 'ibm,plat-res-int-priorities'\n");
662 bool __init
xive_spapr_init(void)
664 struct device_node
*np
;
667 struct property
*prop
;
674 if (xive_cmdline_disabled
)
677 pr_devel("%s()\n", __func__
);
678 np
= of_find_compatible_node(NULL
, NULL
, "ibm,power-ivpe");
680 pr_devel("not found !\n");
683 pr_devel("Found %s\n", np
->full_name
);
685 /* Resource 1 is the OS ring TIMA */
686 if (of_address_to_resource(np
, 1, &r
)) {
687 pr_err("Failed to get thread mgmnt area resource\n");
690 tima
= ioremap(r
.start
, resource_size(&r
));
692 pr_err("Failed to map thread mgmnt area\n");
696 if (!xive_get_max_prio(&max_prio
))
699 /* Feed the IRQ number allocator with the ranges given in the DT */
700 reg
= of_get_property(np
, "ibm,xive-lisn-ranges", &len
);
702 pr_err("Failed to read 'ibm,xive-lisn-ranges' property\n");
706 if (len
% (2 * sizeof(u32
)) != 0) {
707 pr_err("invalid 'ibm,xive-lisn-ranges' property\n");
711 for (i
= 0; i
< len
/ (2 * sizeof(u32
)); i
++, reg
+= 2)
712 xive_irq_bitmap_add(be32_to_cpu(reg
[0]),
713 be32_to_cpu(reg
[1]));
715 /* Iterate the EQ sizes and pick one */
716 of_property_for_each_u32(np
, "ibm,xive-eq-sizes", prop
, reg
, val
) {
717 xive_queue_shift
= val
;
718 if (val
== PAGE_SHIFT
)
722 /* Initialize XIVE core with our backend */
723 if (!xive_core_init(&xive_spapr_ops
, tima
, TM_QW1_OS
, max_prio
))
726 pr_info("Using %dkB queues\n", 1 << (xive_queue_shift
- 10));