]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blame - arch/powerpc/include/asm/xive.h
treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 152
[mirror_ubuntu-kernels.git] / arch / powerpc / include / asm / xive.h
CommitLineData
2874c5fd 1/* SPDX-License-Identifier: GPL-2.0-or-later */
243e2511
BH
2/*
3 * Copyright 2016,2017 IBM Corporation.
243e2511
BH
4 */
5#ifndef _ASM_POWERPC_XIVE_H
6#define _ASM_POWERPC_XIVE_H
7
8#define XIVE_INVALID_VP 0xffffffff
9
10#ifdef CONFIG_PPC_XIVE
11
12/*
13 * Thread Interrupt Management Area (TIMA)
14 *
15 * This is a global MMIO region divided in 4 pages of varying access
16 * permissions, providing access to per-cpu interrupt management
17 * functions. It always identifies the CPU doing the access based
18 * on the PowerBus initiator ID, thus we always access via the
19 * same offset regardless of where the code is executing
20 */
21extern void __iomem *xive_tima;
39e9af3d 22extern unsigned long xive_tima_os;
243e2511
BH
23
24/*
25 * Offset in the TM area of our current execution level (provided by
26 * the backend)
27 */
28extern u32 xive_tima_offset;
29
30/*
31 * Per-irq data (irq_get_handler_data for normal IRQs), IPIs
32 * have it stored in the xive_cpu structure. We also cache
33 * for normal interrupts the current target CPU.
34 *
35 * This structure is setup by the backend for each interrupt.
36 */
37struct xive_irq_data {
38 u64 flags;
39 u64 eoi_page;
40 void __iomem *eoi_mmio;
41 u64 trig_page;
42 void __iomem *trig_mmio;
43 u32 esb_shift;
44 int src_chip;
c58a14a9 45 u32 hw_irq;
243e2511
BH
46
47 /* Setup/used by frontend */
48 int target;
49 bool saved_p;
50};
51#define XIVE_IRQ_FLAG_STORE_EOI 0x01
52#define XIVE_IRQ_FLAG_LSI 0x02
53#define XIVE_IRQ_FLAG_SHIFT_BUG 0x04
54#define XIVE_IRQ_FLAG_MASK_FW 0x08
55#define XIVE_IRQ_FLAG_EOI_FW 0x10
bed81ee1 56#define XIVE_IRQ_FLAG_H_INT_ESB 0x20
243e2511 57
7f1c410d
BH
58/* Special flag set by KVM for excalation interrupts */
59#define XIVE_IRQ_NO_EOI 0x80
60
243e2511
BH
61#define XIVE_INVALID_CHIP_ID -1
62
63/* A queue tracking structure in a CPU */
64struct xive_q {
65 __be32 *qpage;
66 u32 msk;
67 u32 idx;
68 u32 toggle;
69 u64 eoi_phys;
70 u32 esc_irq;
71 atomic_t count;
72 atomic_t pending_count;
13ce3297
CLG
73 u64 guest_qaddr;
74 u32 guest_qshift;
243e2511
BH
75};
76
243e2511
BH
77/* Global enable flags for the XIVE support */
78extern bool __xive_enabled;
79
80static inline bool xive_enabled(void) { return __xive_enabled; }
81
eac1e731 82extern bool xive_spapr_init(void);
243e2511
BH
83extern bool xive_native_init(void);
84extern void xive_smp_probe(void);
85extern int xive_smp_prepare_cpu(unsigned int cpu);
86extern void xive_smp_setup_cpu(void);
87extern void xive_smp_disable_cpu(void);
eac1e731 88extern void xive_teardown_cpu(void);
243e2511
BH
89extern void xive_shutdown(void);
90extern void xive_flush_interrupt(void);
91
92/* xmon hook */
93extern void xmon_xive_do_dump(int cpu);
94
95/* APIs used by KVM */
96extern u32 xive_native_default_eq_shift(void);
97extern u32 xive_native_alloc_vp_block(u32 max_vcpus);
98extern void xive_native_free_vp_block(u32 vp_base);
99extern int xive_native_populate_irq_data(u32 hw_irq,
100 struct xive_irq_data *data);
101extern void xive_cleanup_irq_data(struct xive_irq_data *xd);
102extern u32 xive_native_alloc_irq(void);
103extern void xive_native_free_irq(u32 irq);
104extern int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq);
105
106extern int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
107 __be32 *qpage, u32 order, bool can_escalate);
108extern void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio);
109
5af50993 110extern void xive_native_sync_source(u32 hw_irq);
88ec6b93 111extern void xive_native_sync_queue(u32 hw_irq);
243e2511 112extern bool is_xive_irq(struct irq_chip *chip);
bf4159da 113extern int xive_native_enable_vp(u32 vp_id, bool single_escalation);
5af50993
BH
114extern int xive_native_disable_vp(u32 vp_id);
115extern int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id);
bf4159da 116extern bool xive_native_has_single_escalation(void);
243e2511 117
88ec6b93
CLG
118extern int xive_native_get_queue_info(u32 vp_id, uint32_t prio,
119 u64 *out_qpage,
120 u64 *out_qsize,
121 u64 *out_qeoi_page,
122 u32 *out_escalate_irq,
123 u64 *out_qflags);
124
125extern int xive_native_get_queue_state(u32 vp_id, uint32_t prio, u32 *qtoggle,
126 u32 *qindex);
127extern int xive_native_set_queue_state(u32 vp_id, uint32_t prio, u32 qtoggle,
128 u32 qindex);
129extern int xive_native_get_vp_state(u32 vp_id, u64 *out_state);
130
243e2511
BH
131#else
132
133static inline bool xive_enabled(void) { return false; }
134
eac1e731 135static inline bool xive_spapr_init(void) { return false; }
243e2511
BH
136static inline bool xive_native_init(void) { return false; }
137static inline void xive_smp_probe(void) { }
38833faa 138static inline int xive_smp_prepare_cpu(unsigned int cpu) { return -EINVAL; }
243e2511
BH
139static inline void xive_smp_setup_cpu(void) { }
140static inline void xive_smp_disable_cpu(void) { }
141static inline void xive_kexec_teardown_cpu(int secondary) { }
142static inline void xive_shutdown(void) { }
143static inline void xive_flush_interrupt(void) { }
144
145static inline u32 xive_native_alloc_vp_block(u32 max_vcpus) { return XIVE_INVALID_VP; }
146static inline void xive_native_free_vp_block(u32 vp_base) { }
147
148#endif
149
150#endif /* _ASM_POWERPC_XIVE_H */