2 * Copyright 2016,2017 IBM Corporation.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
10 #define pr_fmt(fmt) "xive: " fmt
12 #include <linux/types.h>
13 #include <linux/threads.h>
14 #include <linux/kernel.h>
15 #include <linux/irq.h>
16 #include <linux/debugfs.h>
17 #include <linux/smp.h>
18 #include <linux/interrupt.h>
19 #include <linux/seq_file.h>
20 #include <linux/init.h>
21 #include <linux/cpu.h>
23 #include <linux/slab.h>
24 #include <linux/spinlock.h>
25 #include <linux/msi.h>
30 #include <asm/machdep.h>
32 #include <asm/errno.h>
34 #include <asm/xive-regs.h>
37 #include "xive-internal.h"
43 #define DBG_VERBOSE(fmt...) pr_devel(fmt)
45 #define DBG_VERBOSE(fmt...) do { } while(0)
49 EXPORT_SYMBOL_GPL(__xive_enabled
);
50 bool xive_cmdline_disabled
;
52 /* We use only one priority for now */
53 static u8 xive_irq_priority
;
55 /* TIMA exported to KVM */
56 void __iomem
*xive_tima
;
57 EXPORT_SYMBOL_GPL(xive_tima
);
61 static const struct xive_ops
*xive_ops
;
63 /* Our global interrupt domain */
64 static struct irq_domain
*xive_irq_domain
;
67 /* The IPIs all use the same logical irq number */
68 static u32 xive_ipi_irq
;
71 /* Xive state for each CPU */
72 static DEFINE_PER_CPU(struct xive_cpu
*, xive_cpu
);
75 * A "disabled" interrupt should never fire, to catch problems
76 * we set its logical number to this
78 #define XIVE_BAD_IRQ 0x7fffffff
79 #define XIVE_MAX_IRQ (XIVE_BAD_IRQ - 1)
81 /* An invalid CPU target */
82 #define XIVE_INVALID_TARGET (-1)
85 * Read the next entry in a queue, return its content if it's valid
86 * or 0 if there is no new entry.
88 * The queue pointer is moved forward unless "just_peek" is set
90 static u32
xive_read_eq(struct xive_q
*q
, bool just_peek
)
96 cur
= be32_to_cpup(q
->qpage
+ q
->idx
);
98 /* Check valid bit (31) vs current toggle polarity */
99 if ((cur
>> 31) == q
->toggle
)
102 /* If consuming from the queue ... */
105 q
->idx
= (q
->idx
+ 1) & q
->msk
;
107 /* Wrap around: flip valid toggle */
111 /* Mask out the valid bit (31) */
112 return cur
& 0x7fffffff;
116 * Scans all the queue that may have interrupts in them
117 * (based on "pending_prio") in priority order until an
118 * interrupt is found or all the queues are empty.
120 * Then updates the CPPR (Current Processor Priority
121 * Register) based on the most favored interrupt found
122 * (0xff if none) and return what was found (0 if none).
124 * If just_peek is set, return the most favored pending
125 * interrupt if any but don't update the queue pointers.
127 * Note: This function can operate generically on any number
128 * of queues (up to 8). The current implementation of the XIVE
129 * driver only uses a single queue however.
131 * Note2: This will also "flush" "the pending_count" of a queue
132 * into the "count" when that queue is observed to be empty.
133 * This is used to keep track of the amount of interrupts
134 * targetting a queue. When an interrupt is moved away from
135 * a queue, we only decrement that queue count once the queue
136 * has been observed empty to avoid races.
138 static u32
xive_scan_interrupts(struct xive_cpu
*xc
, bool just_peek
)
143 /* Find highest pending priority */
144 while (xc
->pending_prio
!= 0) {
147 prio
= ffs(xc
->pending_prio
) - 1;
148 DBG_VERBOSE("scan_irq: trying prio %d\n", prio
);
151 irq
= xive_read_eq(&xc
->queue
[prio
], just_peek
);
153 /* Found something ? That's it */
157 /* Clear pending bits */
158 xc
->pending_prio
&= ~(1 << prio
);
161 * Check if the queue count needs adjusting due to
162 * interrupts being moved away. See description of
163 * xive_dec_target_count()
165 q
= &xc
->queue
[prio
];
166 if (atomic_read(&q
->pending_count
)) {
167 int p
= atomic_xchg(&q
->pending_count
, 0);
169 WARN_ON(p
> atomic_read(&q
->count
));
170 atomic_sub(p
, &q
->count
);
175 /* If nothing was found, set CPPR to 0xff */
179 /* Update HW CPPR to match if necessary */
180 if (prio
!= xc
->cppr
) {
181 DBG_VERBOSE("scan_irq: adjusting CPPR to %d\n", prio
);
183 out_8(xive_tima
+ xive_tima_offset
+ TM_CPPR
, prio
);
190 * This is used to perform the magic loads from an ESB
191 * described in xive.h
193 static u8
xive_poke_esb(struct xive_irq_data
*xd
, u32 offset
)
197 /* Handle HW errata */
198 if (xd
->flags
& XIVE_IRQ_FLAG_SHIFT_BUG
)
199 offset
|= offset
<< 4;
201 val
= in_be64(xd
->eoi_mmio
+ offset
);
207 static void xive_dump_eq(const char *name
, struct xive_q
*q
)
214 i0
= be32_to_cpup(q
->qpage
+ idx
);
215 idx
= (idx
+ 1) & q
->msk
;
216 i1
= be32_to_cpup(q
->qpage
+ idx
);
217 xmon_printf(" %s Q T=%d %08x %08x ...\n", name
,
221 void xmon_xive_do_dump(int cpu
)
223 struct xive_cpu
*xc
= per_cpu(xive_cpu
, cpu
);
225 xmon_printf("XIVE state for CPU %d:\n", cpu
);
226 xmon_printf(" pp=%02x cppr=%02x\n", xc
->pending_prio
, xc
->cppr
);
227 xive_dump_eq("IRQ", &xc
->queue
[xive_irq_priority
]);
230 u64 val
= xive_poke_esb(&xc
->ipi_data
, XIVE_ESB_GET
);
231 xmon_printf(" IPI state: %x:%c%c\n", xc
->hw_ipi
,
232 val
& XIVE_ESB_VAL_P
? 'P' : 'p',
233 val
& XIVE_ESB_VAL_P
? 'Q' : 'q');
237 #endif /* CONFIG_XMON */
239 static unsigned int xive_get_irq(void)
241 struct xive_cpu
*xc
= __this_cpu_read(xive_cpu
);
245 * This can be called either as a result of a HW interrupt or
246 * as a "replay" because EOI decided there was still something
247 * in one of the queues.
249 * First we perform an ACK cycle in order to update our mask
250 * of pending priorities. This will also have the effect of
251 * updating the CPPR to the most favored pending interrupts.
253 * In the future, if we have a way to differenciate a first
254 * entry (on HW interrupt) from a replay triggered by EOI,
255 * we could skip this on replays unless we soft-mask tells us
256 * that a new HW interrupt occurred.
258 xive_ops
->update_pending(xc
);
260 DBG_VERBOSE("get_irq: pending=%02x\n", xc
->pending_prio
);
262 /* Scan our queue(s) for interrupts */
263 irq
= xive_scan_interrupts(xc
, false);
265 DBG_VERBOSE("get_irq: got irq 0x%x, new pending=0x%02x\n",
266 irq
, xc
->pending_prio
);
268 /* Return pending interrupt if any */
269 if (irq
== XIVE_BAD_IRQ
)
275 * After EOI'ing an interrupt, we need to re-check the queue
276 * to see if another interrupt is pending since multiple
277 * interrupts can coalesce into a single notification to the
280 * If we find that there is indeed more in there, we call
281 * force_external_irq_replay() to make Linux synthetize an
282 * external interrupt on the next call to local_irq_restore().
284 static void xive_do_queue_eoi(struct xive_cpu
*xc
)
286 if (xive_scan_interrupts(xc
, true) != 0) {
287 DBG_VERBOSE("eoi: pending=0x%02x\n", xc
->pending_prio
);
288 force_external_irq_replay();
293 * EOI an interrupt at the source. There are several methods
294 * to do this depending on the HW version and source type
296 void xive_do_source_eoi(u32 hw_irq
, struct xive_irq_data
*xd
)
298 /* If the XIVE supports the new "store EOI facility, use it */
299 if (xd
->flags
& XIVE_IRQ_FLAG_STORE_EOI
)
300 out_be64(xd
->eoi_mmio
+ XIVE_ESB_STORE_EOI
, 0);
301 else if (hw_irq
&& xd
->flags
& XIVE_IRQ_FLAG_EOI_FW
) {
303 * The FW told us to call it. This happens for some
304 * interrupt sources that need additional HW whacking
305 * beyond the ESB manipulation. For example LPC interrupts
306 * on P9 DD1.0 need a latch to be clared in the LPC bridge
307 * itself. The Firmware will take care of it.
309 if (WARN_ON_ONCE(!xive_ops
->eoi
))
311 xive_ops
->eoi(hw_irq
);
316 * Otherwise for EOI, we use the special MMIO that does
317 * a clear of both P and Q and returns the old Q,
318 * except for LSIs where we use the "EOI cycle" special
321 * This allows us to then do a re-trigger if Q was set
322 * rather than synthesizing an interrupt in software
324 * For LSIs, using the HW EOI cycle works around a problem
325 * on P9 DD1 PHBs where the other ESB accesses don't work
328 if (xd
->flags
& XIVE_IRQ_FLAG_LSI
)
329 in_be64(xd
->eoi_mmio
);
331 eoi_val
= xive_poke_esb(xd
, XIVE_ESB_SET_PQ_00
);
332 DBG_VERBOSE("eoi_val=%x\n", offset
, eoi_val
);
334 /* Re-trigger if needed */
335 if ((eoi_val
& XIVE_ESB_VAL_Q
) && xd
->trig_mmio
)
336 out_be64(xd
->trig_mmio
, 0);
341 /* irq_chip eoi callback */
342 static void xive_irq_eoi(struct irq_data
*d
)
344 struct xive_irq_data
*xd
= irq_data_get_irq_handler_data(d
);
345 struct xive_cpu
*xc
= __this_cpu_read(xive_cpu
);
347 DBG_VERBOSE("eoi_irq: irq=%d [0x%lx] pending=%02x\n",
348 d
->irq
, irqd_to_hwirq(d
), xc
->pending_prio
);
351 * EOI the source if it hasn't been disabled and hasn't
352 * been passed-through to a KVM guest
354 if (!irqd_irq_disabled(d
) && !irqd_is_forwarded_to_vcpu(d
))
355 xive_do_source_eoi(irqd_to_hwirq(d
), xd
);
358 * Clear saved_p to indicate that it's no longer occupying
359 * a queue slot on the target queue
363 /* Check for more work in the queue */
364 xive_do_queue_eoi(xc
);
368 * Helper used to mask and unmask an interrupt source. This
369 * is only called for normal interrupts that do not require
370 * masking/unmasking via firmware.
372 static void xive_do_source_set_mask(struct xive_irq_data
*xd
,
378 * If the interrupt had P set, it may be in a queue.
380 * We need to make sure we don't re-enable it until it
381 * has been fetched from that queue and EOId. We keep
382 * a copy of that P state and use it to restore the
383 * ESB accordingly on unmask.
386 val
= xive_poke_esb(xd
, XIVE_ESB_SET_PQ_01
);
387 xd
->saved_p
= !!(val
& XIVE_ESB_VAL_P
);
388 } else if (xd
->saved_p
)
389 xive_poke_esb(xd
, XIVE_ESB_SET_PQ_10
);
391 xive_poke_esb(xd
, XIVE_ESB_SET_PQ_00
);
395 * Try to chose "cpu" as a new interrupt target. Increments
396 * the queue accounting for that target if it's not already
399 static bool xive_try_pick_target(int cpu
)
401 struct xive_cpu
*xc
= per_cpu(xive_cpu
, cpu
);
402 struct xive_q
*q
= &xc
->queue
[xive_irq_priority
];
406 * Calculate max number of interrupts in that queue.
408 * We leave a gap of 1 just in case...
410 max
= (q
->msk
+ 1) - 1;
411 return !!atomic_add_unless(&q
->count
, 1, max
);
415 * Un-account an interrupt for a target CPU. We don't directly
416 * decrement q->count since the interrupt might still be present
419 * Instead increment a separate counter "pending_count" which
420 * will be substracted from "count" later when that CPU observes
421 * the queue to be empty.
423 static void xive_dec_target_count(int cpu
)
425 struct xive_cpu
*xc
= per_cpu(xive_cpu
, cpu
);
426 struct xive_q
*q
= &xc
->queue
[xive_irq_priority
];
428 if (unlikely(WARN_ON(cpu
< 0 || !xc
))) {
429 pr_err("%s: cpu=%d xc=%p\n", __func__
, cpu
, xc
);
434 * We increment the "pending count" which will be used
435 * to decrement the target queue count whenever it's next
436 * processed and found empty. This ensure that we don't
437 * decrement while we still have the interrupt there
440 atomic_inc(&q
->pending_count
);
443 /* Find a tentative CPU target in a CPU mask */
444 static int xive_find_target_in_mask(const struct cpumask
*mask
,
447 int cpu
, first
, num
, i
;
449 /* Pick up a starting point CPU in the mask based on fuzz */
450 num
= cpumask_weight(mask
);
454 cpu
= cpumask_first(mask
);
455 for (i
= 0; i
< first
&& cpu
< nr_cpu_ids
; i
++)
456 cpu
= cpumask_next(cpu
, mask
);
459 if (WARN_ON(cpu
>= nr_cpu_ids
))
460 cpu
= cpumask_first(cpu_online_mask
);
462 /* Remember first one to handle wrap-around */
466 * Now go through the entire mask until we find a valid
471 * We re-check online as the fallback case passes us
472 * an untested affinity mask
474 if (cpu_online(cpu
) && xive_try_pick_target(cpu
))
476 cpu
= cpumask_next(cpu
, mask
);
480 if (cpu
>= nr_cpu_ids
)
481 cpu
= cpumask_first(mask
);
487 * Pick a target CPU for an interrupt. This is done at
488 * startup or if the affinity is changed in a way that
489 * invalidates the current target.
491 static int xive_pick_irq_target(struct irq_data
*d
,
492 const struct cpumask
*affinity
)
494 static unsigned int fuzz
;
495 struct xive_irq_data
*xd
= irq_data_get_irq_handler_data(d
);
500 * If we have chip IDs, first we try to build a mask of
501 * CPUs matching the CPU and find a target in there
503 if (xd
->src_chip
!= XIVE_INVALID_CHIP_ID
&&
504 zalloc_cpumask_var(&mask
, GFP_ATOMIC
)) {
505 /* Build a mask of matching chip IDs */
506 for_each_cpu_and(cpu
, affinity
, cpu_online_mask
) {
507 struct xive_cpu
*xc
= per_cpu(xive_cpu
, cpu
);
508 if (xc
->chip_id
== xd
->src_chip
)
509 cpumask_set_cpu(cpu
, mask
);
511 /* Try to find a target */
512 if (cpumask_empty(mask
))
515 cpu
= xive_find_target_in_mask(mask
, fuzz
++);
516 free_cpumask_var(mask
);
522 /* No chip IDs, fallback to using the affinity mask */
523 return xive_find_target_in_mask(affinity
, fuzz
++);
526 static unsigned int xive_irq_startup(struct irq_data
*d
)
528 struct xive_irq_data
*xd
= irq_data_get_irq_handler_data(d
);
529 unsigned int hw_irq
= (unsigned int)irqd_to_hwirq(d
);
532 pr_devel("xive_irq_startup: irq %d [0x%x] data @%p\n",
535 #ifdef CONFIG_PCI_MSI
537 * The generic MSI code returns with the interrupt disabled on the
538 * card, using the MSI mask bits. Firmware doesn't appear to unmask
539 * at that level, so we do it here by hand.
541 if (irq_data_get_msi_desc(d
))
542 pci_msi_unmask_irq(d
);
546 target
= xive_pick_irq_target(d
, irq_data_get_affinity_mask(d
));
547 if (target
== XIVE_INVALID_TARGET
) {
548 /* Try again breaking affinity */
549 target
= xive_pick_irq_target(d
, cpu_online_mask
);
550 if (target
== XIVE_INVALID_TARGET
)
552 pr_warn("irq %d started with broken affinity\n", d
->irq
);
556 if (WARN_ON(target
== XIVE_INVALID_TARGET
||
557 target
>= nr_cpu_ids
))
558 target
= smp_processor_id();
563 * Configure the logical number to be the Linux IRQ number
564 * and set the target queue
566 rc
= xive_ops
->configure_irq(hw_irq
,
567 get_hard_smp_processor_id(target
),
568 xive_irq_priority
, d
->irq
);
573 xive_do_source_set_mask(xd
, false);
578 static void xive_irq_shutdown(struct irq_data
*d
)
580 struct xive_irq_data
*xd
= irq_data_get_irq_handler_data(d
);
581 unsigned int hw_irq
= (unsigned int)irqd_to_hwirq(d
);
583 pr_devel("xive_irq_shutdown: irq %d [0x%x] data @%p\n",
586 if (WARN_ON(xd
->target
== XIVE_INVALID_TARGET
))
589 /* Mask the interrupt at the source */
590 xive_do_source_set_mask(xd
, true);
593 * The above may have set saved_p. We clear it otherwise it
594 * will prevent re-enabling later on. It is ok to forget the
595 * fact that the interrupt might be in a queue because we are
596 * accounting that already in xive_dec_target_count() and will
597 * be re-routing it to a new queue with proper accounting when
598 * it's started up again
603 * Mask the interrupt in HW in the IVT/EAS and set the number
604 * to be the "bad" IRQ number
606 xive_ops
->configure_irq(hw_irq
,
607 get_hard_smp_processor_id(xd
->target
),
610 xive_dec_target_count(xd
->target
);
611 xd
->target
= XIVE_INVALID_TARGET
;
614 static void xive_irq_unmask(struct irq_data
*d
)
616 struct xive_irq_data
*xd
= irq_data_get_irq_handler_data(d
);
618 pr_devel("xive_irq_unmask: irq %d data @%p\n", d
->irq
, xd
);
621 * This is a workaround for PCI LSI problems on P9, for
622 * these, we call FW to set the mask. The problems might
623 * be fixed by P9 DD2.0, if that is the case, firmware
624 * will no longer set that flag.
626 if (xd
->flags
& XIVE_IRQ_FLAG_MASK_FW
) {
627 unsigned int hw_irq
= (unsigned int)irqd_to_hwirq(d
);
628 xive_ops
->configure_irq(hw_irq
,
629 get_hard_smp_processor_id(xd
->target
),
630 xive_irq_priority
, d
->irq
);
634 xive_do_source_set_mask(xd
, false);
637 static void xive_irq_mask(struct irq_data
*d
)
639 struct xive_irq_data
*xd
= irq_data_get_irq_handler_data(d
);
641 pr_devel("xive_irq_mask: irq %d data @%p\n", d
->irq
, xd
);
644 * This is a workaround for PCI LSI problems on P9, for
645 * these, we call OPAL to set the mask. The problems might
646 * be fixed by P9 DD2.0, if that is the case, firmware
647 * will no longer set that flag.
649 if (xd
->flags
& XIVE_IRQ_FLAG_MASK_FW
) {
650 unsigned int hw_irq
= (unsigned int)irqd_to_hwirq(d
);
651 xive_ops
->configure_irq(hw_irq
,
652 get_hard_smp_processor_id(xd
->target
),
657 xive_do_source_set_mask(xd
, true);
660 static int xive_irq_set_affinity(struct irq_data
*d
,
661 const struct cpumask
*cpumask
,
664 struct xive_irq_data
*xd
= irq_data_get_irq_handler_data(d
);
665 unsigned int hw_irq
= (unsigned int)irqd_to_hwirq(d
);
666 u32 target
, old_target
;
669 pr_devel("xive_irq_set_affinity: irq %d\n", d
->irq
);
671 /* Is this valid ? */
672 if (cpumask_any_and(cpumask
, cpu_online_mask
) >= nr_cpu_ids
)
676 * If existing target is already in the new mask, and is
677 * online then do nothing.
679 if (xd
->target
!= XIVE_INVALID_TARGET
&&
680 cpu_online(xd
->target
) &&
681 cpumask_test_cpu(xd
->target
, cpumask
))
682 return IRQ_SET_MASK_OK
;
684 /* Pick a new target */
685 target
= xive_pick_irq_target(d
, cpumask
);
687 /* No target found */
688 if (target
== XIVE_INVALID_TARGET
)
692 if (WARN_ON(target
>= nr_cpu_ids
))
693 target
= smp_processor_id();
695 old_target
= xd
->target
;
698 * Only configure the irq if it's not currently passed-through to
701 if (!irqd_is_forwarded_to_vcpu(d
))
702 rc
= xive_ops
->configure_irq(hw_irq
,
703 get_hard_smp_processor_id(target
),
704 xive_irq_priority
, d
->irq
);
706 pr_err("Error %d reconfiguring irq %d\n", rc
, d
->irq
);
710 pr_devel(" target: 0x%x\n", target
);
713 /* Give up previous target */
714 if (old_target
!= XIVE_INVALID_TARGET
)
715 xive_dec_target_count(old_target
);
717 return IRQ_SET_MASK_OK
;
720 static int xive_irq_set_type(struct irq_data
*d
, unsigned int flow_type
)
722 struct xive_irq_data
*xd
= irq_data_get_irq_handler_data(d
);
725 * We only support these. This has really no effect other than setting
726 * the corresponding descriptor bits mind you but those will in turn
727 * affect the resend function when re-enabling an edge interrupt.
729 * Set set the default to edge as explained in map().
731 if (flow_type
== IRQ_TYPE_DEFAULT
|| flow_type
== IRQ_TYPE_NONE
)
732 flow_type
= IRQ_TYPE_EDGE_RISING
;
734 if (flow_type
!= IRQ_TYPE_EDGE_RISING
&&
735 flow_type
!= IRQ_TYPE_LEVEL_LOW
)
738 irqd_set_trigger_type(d
, flow_type
);
741 * Double check it matches what the FW thinks
743 * NOTE: We don't know yet if the PAPR interface will provide
744 * the LSI vs MSI information apart from the device-tree so
745 * this check might have to move into an optional backend call
746 * that is specific to the native backend
748 if ((flow_type
== IRQ_TYPE_LEVEL_LOW
) !=
749 !!(xd
->flags
& XIVE_IRQ_FLAG_LSI
)) {
750 pr_warn("Interrupt %d (HW 0x%x) type mismatch, Linux says %s, FW says %s\n",
751 d
->irq
, (u32
)irqd_to_hwirq(d
),
752 (flow_type
== IRQ_TYPE_LEVEL_LOW
) ? "Level" : "Edge",
753 (xd
->flags
& XIVE_IRQ_FLAG_LSI
) ? "Level" : "Edge");
756 return IRQ_SET_MASK_OK_NOCOPY
;
759 static int xive_irq_retrigger(struct irq_data
*d
)
761 struct xive_irq_data
*xd
= irq_data_get_irq_handler_data(d
);
763 /* This should be only for MSIs */
764 if (WARN_ON(xd
->flags
& XIVE_IRQ_FLAG_LSI
))
768 * To perform a retrigger, we first set the PQ bits to
769 * 11, then perform an EOI.
771 xive_poke_esb(xd
, XIVE_ESB_SET_PQ_11
);
774 * Note: We pass "0" to the hw_irq argument in order to
775 * avoid calling into the backend EOI code which we don't
776 * want to do in the case of a re-trigger. Backends typically
777 * only do EOI for LSIs anyway.
779 xive_do_source_eoi(0, xd
);
784 static int xive_irq_set_vcpu_affinity(struct irq_data
*d
, void *state
)
786 struct xive_irq_data
*xd
= irq_data_get_irq_handler_data(d
);
787 unsigned int hw_irq
= (unsigned int)irqd_to_hwirq(d
);
792 * We only support this on interrupts that do not require
793 * firmware calls for masking and unmasking
795 if (xd
->flags
& XIVE_IRQ_FLAG_MASK_FW
)
799 * This is called by KVM with state non-NULL for enabling
800 * pass-through or NULL for disabling it
803 irqd_set_forwarded_to_vcpu(d
);
805 /* Set it to PQ=10 state to prevent further sends */
806 pq
= xive_poke_esb(xd
, XIVE_ESB_SET_PQ_10
);
808 /* No target ? nothing to do */
809 if (xd
->target
== XIVE_INVALID_TARGET
) {
811 * An untargetted interrupt should have been
812 * also masked at the source
820 * If P was set, adjust state to PQ=11 to indicate
821 * that a resend is needed for the interrupt to reach
822 * the guest. Also remember the value of P.
824 * This also tells us that it's in flight to a host queue
825 * or has already been fetched but hasn't been EOIed yet
826 * by the host. This it's potentially using up a host
827 * queue slot. This is important to know because as long
828 * as this is the case, we must not hard-unmask it when
829 * "returning" that interrupt to the host.
831 * This saved_p is cleared by the host EOI, when we know
832 * for sure the queue slot is no longer in use.
835 pq
= xive_poke_esb(xd
, XIVE_ESB_SET_PQ_11
);
839 * Sync the XIVE source HW to ensure the interrupt
840 * has gone through the EAS before we change its
841 * target to the guest. That should guarantee us
842 * that we *will* eventually get an EOI for it on
843 * the host. Otherwise there would be a small window
844 * for P to be seen here but the interrupt going
845 * to the guest queue.
847 if (xive_ops
->sync_source
)
848 xive_ops
->sync_source(hw_irq
);
852 irqd_clr_forwarded_to_vcpu(d
);
854 /* No host target ? hard mask and return */
855 if (xd
->target
== XIVE_INVALID_TARGET
) {
856 xive_do_source_set_mask(xd
, true);
861 * Sync the XIVE source HW to ensure the interrupt
862 * has gone through the EAS before we change its
863 * target to the host.
865 if (xive_ops
->sync_source
)
866 xive_ops
->sync_source(hw_irq
);
869 * By convention we are called with the interrupt in
870 * a PQ=10 or PQ=11 state, ie, it won't fire and will
871 * have latched in Q whether there's a pending HW
874 * First reconfigure the target.
876 rc
= xive_ops
->configure_irq(hw_irq
,
877 get_hard_smp_processor_id(xd
->target
),
878 xive_irq_priority
, d
->irq
);
883 * Then if saved_p is not set, effectively re-enable the
884 * interrupt with an EOI. If it is set, we know there is
885 * still a message in a host queue somewhere that will be
888 * Note: We don't check irqd_irq_disabled(). Effectively,
889 * we *will* let the irq get through even if masked if the
890 * HW is still firing it in order to deal with the whole
891 * saved_p business properly. If the interrupt triggers
892 * while masked, the generic code will re-mask it anyway.
895 xive_do_source_eoi(hw_irq
, xd
);
901 static struct irq_chip xive_irq_chip
= {
903 .irq_startup
= xive_irq_startup
,
904 .irq_shutdown
= xive_irq_shutdown
,
905 .irq_eoi
= xive_irq_eoi
,
906 .irq_mask
= xive_irq_mask
,
907 .irq_unmask
= xive_irq_unmask
,
908 .irq_set_affinity
= xive_irq_set_affinity
,
909 .irq_set_type
= xive_irq_set_type
,
910 .irq_retrigger
= xive_irq_retrigger
,
911 .irq_set_vcpu_affinity
= xive_irq_set_vcpu_affinity
,
914 bool is_xive_irq(struct irq_chip
*chip
)
916 return chip
== &xive_irq_chip
;
918 EXPORT_SYMBOL_GPL(is_xive_irq
);
920 void xive_cleanup_irq_data(struct xive_irq_data
*xd
)
923 iounmap(xd
->eoi_mmio
);
924 if (xd
->eoi_mmio
== xd
->trig_mmio
)
925 xd
->trig_mmio
= NULL
;
929 iounmap(xd
->trig_mmio
);
930 xd
->trig_mmio
= NULL
;
933 EXPORT_SYMBOL_GPL(xive_cleanup_irq_data
);
935 static int xive_irq_alloc_data(unsigned int virq
, irq_hw_number_t hw
)
937 struct xive_irq_data
*xd
;
940 xd
= kzalloc(sizeof(struct xive_irq_data
), GFP_KERNEL
);
943 rc
= xive_ops
->populate_irq_data(hw
, xd
);
948 xd
->target
= XIVE_INVALID_TARGET
;
949 irq_set_handler_data(virq
, xd
);
954 static void xive_irq_free_data(unsigned int virq
)
956 struct xive_irq_data
*xd
= irq_get_handler_data(virq
);
960 irq_set_handler_data(virq
, NULL
);
961 xive_cleanup_irq_data(xd
);
967 static void xive_cause_ipi(int cpu
)
970 struct xive_irq_data
*xd
;
972 xc
= per_cpu(xive_cpu
, cpu
);
974 DBG_VERBOSE("IPI CPU %d -> %d (HW IRQ 0x%x)\n",
975 smp_processor_id(), cpu
, xc
->hw_ipi
);
978 if (WARN_ON(!xd
->trig_mmio
))
980 out_be64(xd
->trig_mmio
, 0);
983 static irqreturn_t
xive_muxed_ipi_action(int irq
, void *dev_id
)
985 return smp_ipi_demux();
988 static void xive_ipi_eoi(struct irq_data
*d
)
990 struct xive_cpu
*xc
= __this_cpu_read(xive_cpu
);
992 /* Handle possible race with unplug and drop stale IPIs */
995 xive_do_source_eoi(xc
->hw_ipi
, &xc
->ipi_data
);
996 xive_do_queue_eoi(xc
);
999 static void xive_ipi_do_nothing(struct irq_data
*d
)
1002 * Nothing to do, we never mask/unmask IPIs, but the callback
1003 * has to exist for the struct irq_chip.
1007 static struct irq_chip xive_ipi_chip
= {
1009 .irq_eoi
= xive_ipi_eoi
,
1010 .irq_mask
= xive_ipi_do_nothing
,
1011 .irq_unmask
= xive_ipi_do_nothing
,
1014 static void __init
xive_request_ipi(void)
1019 * Initialization failed, move on, we might manage to
1020 * reach the point where we display our errors before
1021 * the system falls appart
1023 if (!xive_irq_domain
)
1027 virq
= irq_create_mapping(xive_irq_domain
, 0);
1028 xive_ipi_irq
= virq
;
1030 WARN_ON(request_irq(virq
, xive_muxed_ipi_action
,
1031 IRQF_PERCPU
| IRQF_NO_THREAD
, "IPI", NULL
));
1034 static int xive_setup_cpu_ipi(unsigned int cpu
)
1036 struct xive_cpu
*xc
;
1039 pr_debug("Setting up IPI for CPU %d\n", cpu
);
1041 xc
= per_cpu(xive_cpu
, cpu
);
1043 /* Check if we are already setup */
1044 if (xc
->hw_ipi
!= 0)
1047 /* Grab an IPI from the backend, this will populate xc->hw_ipi */
1048 if (xive_ops
->get_ipi(cpu
, xc
))
1052 * Populate the IRQ data in the xive_cpu structure and
1053 * configure the HW / enable the IPIs.
1055 rc
= xive_ops
->populate_irq_data(xc
->hw_ipi
, &xc
->ipi_data
);
1057 pr_err("Failed to populate IPI data on CPU %d\n", cpu
);
1060 rc
= xive_ops
->configure_irq(xc
->hw_ipi
,
1061 get_hard_smp_processor_id(cpu
),
1062 xive_irq_priority
, xive_ipi_irq
);
1064 pr_err("Failed to map IPI CPU %d\n", cpu
);
1067 pr_devel("CPU %d HW IPI %x, virq %d, trig_mmio=%p\n", cpu
,
1068 xc
->hw_ipi
, xive_ipi_irq
, xc
->ipi_data
.trig_mmio
);
1071 xive_do_source_set_mask(&xc
->ipi_data
, false);
1076 static void xive_cleanup_cpu_ipi(unsigned int cpu
, struct xive_cpu
*xc
)
1078 /* Disable the IPI and free the IRQ data */
1080 /* Already cleaned up ? */
1081 if (xc
->hw_ipi
== 0)
1085 xive_do_source_set_mask(&xc
->ipi_data
, true);
1088 * Note: We don't call xive_cleanup_irq_data() to free
1089 * the mappings as this is called from an IPI on kexec
1090 * which is not a safe environment to call iounmap()
1093 /* Deconfigure/mask in the backend */
1094 xive_ops
->configure_irq(xc
->hw_ipi
, hard_smp_processor_id(),
1095 0xff, xive_ipi_irq
);
1097 /* Free the IPIs in the backend */
1098 xive_ops
->put_ipi(cpu
, xc
);
1101 void __init
xive_smp_probe(void)
1103 smp_ops
->cause_ipi
= xive_cause_ipi
;
1105 /* Register the IPI */
1108 /* Allocate and setup IPI for the boot CPU */
1109 xive_setup_cpu_ipi(smp_processor_id());
1112 #endif /* CONFIG_SMP */
1114 static int xive_irq_domain_map(struct irq_domain
*h
, unsigned int virq
,
1120 * Mark interrupts as edge sensitive by default so that resend
1121 * actually works. Will fix that up below if needed.
1123 irq_clear_status_flags(virq
, IRQ_LEVEL
);
1126 /* IPIs are special and come up with HW number 0 */
1129 * IPIs are marked per-cpu. We use separate HW interrupts under
1130 * the hood but associated with the same "linux" interrupt
1132 irq_set_chip_and_handler(virq
, &xive_ipi_chip
,
1138 rc
= xive_irq_alloc_data(virq
, hw
);
1142 irq_set_chip_and_handler(virq
, &xive_irq_chip
, handle_fasteoi_irq
);
1147 static void xive_irq_domain_unmap(struct irq_domain
*d
, unsigned int virq
)
1149 struct irq_data
*data
= irq_get_irq_data(virq
);
1150 unsigned int hw_irq
;
1152 /* XXX Assign BAD number */
1155 hw_irq
= (unsigned int)irqd_to_hwirq(data
);
1157 xive_irq_free_data(virq
);
1160 static int xive_irq_domain_xlate(struct irq_domain
*h
, struct device_node
*ct
,
1161 const u32
*intspec
, unsigned int intsize
,
1162 irq_hw_number_t
*out_hwirq
, unsigned int *out_flags
)
1165 *out_hwirq
= intspec
[0];
1168 * If intsize is at least 2, we look for the type in the second cell,
1169 * we assume the LSB indicates a level interrupt.
1173 *out_flags
= IRQ_TYPE_LEVEL_LOW
;
1175 *out_flags
= IRQ_TYPE_EDGE_RISING
;
1177 *out_flags
= IRQ_TYPE_LEVEL_LOW
;
1182 static int xive_irq_domain_match(struct irq_domain
*h
, struct device_node
*node
,
1183 enum irq_domain_bus_token bus_token
)
1185 return xive_ops
->match(node
);
1188 static const struct irq_domain_ops xive_irq_domain_ops
= {
1189 .match
= xive_irq_domain_match
,
1190 .map
= xive_irq_domain_map
,
1191 .unmap
= xive_irq_domain_unmap
,
1192 .xlate
= xive_irq_domain_xlate
,
1195 static void __init
xive_init_host(void)
1197 xive_irq_domain
= irq_domain_add_nomap(NULL
, XIVE_MAX_IRQ
,
1198 &xive_irq_domain_ops
, NULL
);
1199 if (WARN_ON(xive_irq_domain
== NULL
))
1201 irq_set_default_host(xive_irq_domain
);
1204 static void xive_cleanup_cpu_queues(unsigned int cpu
, struct xive_cpu
*xc
)
1206 if (xc
->queue
[xive_irq_priority
].qpage
)
1207 xive_ops
->cleanup_queue(cpu
, xc
, xive_irq_priority
);
1210 static int xive_setup_cpu_queues(unsigned int cpu
, struct xive_cpu
*xc
)
1214 /* We setup 1 queues for now with a 64k page */
1215 if (!xc
->queue
[xive_irq_priority
].qpage
)
1216 rc
= xive_ops
->setup_queue(cpu
, xc
, xive_irq_priority
);
1221 static int xive_prepare_cpu(unsigned int cpu
)
1223 struct xive_cpu
*xc
;
1225 xc
= per_cpu(xive_cpu
, cpu
);
1227 struct device_node
*np
;
1229 xc
= kzalloc_node(sizeof(struct xive_cpu
),
1230 GFP_KERNEL
, cpu_to_node(cpu
));
1233 np
= of_get_cpu_node(cpu
, NULL
);
1235 xc
->chip_id
= of_get_ibm_chip_id(np
);
1238 per_cpu(xive_cpu
, cpu
) = xc
;
1241 /* Setup EQs if not already */
1242 return xive_setup_cpu_queues(cpu
, xc
);
1245 static void xive_setup_cpu(void)
1247 struct xive_cpu
*xc
= __this_cpu_read(xive_cpu
);
1249 /* Debug: Dump the TM state */
1250 pr_devel("CPU %d [HW 0x%02x] VT=%02x\n",
1251 smp_processor_id(), hard_smp_processor_id(),
1252 in_8(xive_tima
+ xive_tima_offset
+ TM_WORD2
));
1254 /* The backend might have additional things to do */
1255 if (xive_ops
->setup_cpu
)
1256 xive_ops
->setup_cpu(smp_processor_id(), xc
);
1258 /* Set CPPR to 0xff to enable flow of interrupts */
1260 out_8(xive_tima
+ xive_tima_offset
+ TM_CPPR
, 0xff);
1264 void xive_smp_setup_cpu(void)
1266 pr_devel("SMP setup CPU %d\n", smp_processor_id());
1268 /* This will have already been done on the boot CPU */
1269 if (smp_processor_id() != boot_cpuid
)
1274 int xive_smp_prepare_cpu(unsigned int cpu
)
1278 /* Allocate per-CPU data and queues */
1279 rc
= xive_prepare_cpu(cpu
);
1283 /* Allocate and setup IPI for the new CPU */
1284 return xive_setup_cpu_ipi(cpu
);
1287 #ifdef CONFIG_HOTPLUG_CPU
1288 static void xive_flush_cpu_queue(unsigned int cpu
, struct xive_cpu
*xc
)
1292 /* We assume local irqs are disabled */
1293 WARN_ON(!irqs_disabled());
1295 /* Check what's already in the CPU queue */
1296 while ((irq
= xive_scan_interrupts(xc
, false)) != 0) {
1298 * We need to re-route that interrupt to its new destination.
1299 * First get and lock the descriptor
1301 struct irq_desc
*desc
= irq_to_desc(irq
);
1302 struct irq_data
*d
= irq_desc_get_irq_data(desc
);
1303 struct xive_irq_data
*xd
;
1304 unsigned int hw_irq
= (unsigned int)irqd_to_hwirq(d
);
1307 * Ignore anything that isn't a XIVE irq and ignore
1308 * IPIs, so can just be dropped.
1310 if (d
->domain
!= xive_irq_domain
|| hw_irq
== 0)
1314 * The IRQ should have already been re-routed, it's just a
1315 * stale in the old queue, so re-trigger it in order to make
1316 * it reach is new destination.
1319 pr_info("CPU %d: Got irq %d while offline, re-sending...\n",
1322 raw_spin_lock(&desc
->lock
);
1323 xd
= irq_desc_get_handler_data(desc
);
1326 * For LSIs, we EOI, this will cause a resend if it's
1327 * still asserted. Otherwise do an MSI retrigger.
1329 if (xd
->flags
& XIVE_IRQ_FLAG_LSI
)
1330 xive_do_source_eoi(irqd_to_hwirq(d
), xd
);
1332 xive_irq_retrigger(d
);
1334 raw_spin_unlock(&desc
->lock
);
1338 void xive_smp_disable_cpu(void)
1340 struct xive_cpu
*xc
= __this_cpu_read(xive_cpu
);
1341 unsigned int cpu
= smp_processor_id();
1343 /* Migrate interrupts away from the CPU */
1344 irq_migrate_all_off_this_cpu();
1346 /* Set CPPR to 0 to disable flow of interrupts */
1348 out_8(xive_tima
+ xive_tima_offset
+ TM_CPPR
, 0);
1350 /* Flush everything still in the queue */
1351 xive_flush_cpu_queue(cpu
, xc
);
1353 /* Re-enable CPPR */
1355 out_8(xive_tima
+ xive_tima_offset
+ TM_CPPR
, 0xff);
1358 void xive_flush_interrupt(void)
1360 struct xive_cpu
*xc
= __this_cpu_read(xive_cpu
);
1361 unsigned int cpu
= smp_processor_id();
1363 /* Called if an interrupt occurs while the CPU is hot unplugged */
1364 xive_flush_cpu_queue(cpu
, xc
);
1367 #endif /* CONFIG_HOTPLUG_CPU */
1369 #endif /* CONFIG_SMP */
1371 void xive_kexec_teardown_cpu(int secondary
)
1373 struct xive_cpu
*xc
= __this_cpu_read(xive_cpu
);
1374 unsigned int cpu
= smp_processor_id();
1376 /* Set CPPR to 0 to disable flow of interrupts */
1378 out_8(xive_tima
+ xive_tima_offset
+ TM_CPPR
, 0);
1380 /* Backend cleanup if any */
1381 if (xive_ops
->teardown_cpu
)
1382 xive_ops
->teardown_cpu(cpu
, xc
);
1385 /* Get rid of IPI */
1386 xive_cleanup_cpu_ipi(cpu
, xc
);
1389 /* Disable and free the queues */
1390 xive_cleanup_cpu_queues(cpu
, xc
);
1393 void xive_shutdown(void)
1395 xive_ops
->shutdown();
1398 bool xive_core_init(const struct xive_ops
*ops
, void __iomem
*area
, u32 offset
,
1402 xive_tima_offset
= offset
;
1404 xive_irq_priority
= max_prio
;
1406 ppc_md
.get_irq
= xive_get_irq
;
1407 __xive_enabled
= true;
1409 pr_devel("Initializing host..\n");
1412 pr_devel("Initializing boot CPU..\n");
1414 /* Allocate per-CPU data and queues */
1415 xive_prepare_cpu(smp_processor_id());
1417 /* Get ready for interrupts */
1420 pr_info("Interrupt handling intialized with %s backend\n",
1422 pr_info("Using priority %d for all interrupts\n", max_prio
);
1427 static int __init
xive_off(char *arg
)
1429 xive_cmdline_disabled
= true;
1432 __setup("xive=off", xive_off
);