]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - include/linux/msi.h
KVM: arm64: Fix PMU probe ordering
[mirror_ubuntu-jammy-kernel.git] / include / linux / msi.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
3b7d1921
EB
2#ifndef LINUX_MSI_H
3#define LINUX_MSI_H
4
b50cac55 5#include <linux/kobject.h>
4aa9bc95 6#include <linux/list.h>
8073c1ac
TG
7#include <asm/msi.h>
8
9/* Dummy shadow structures if an architecture does not define them */
10#ifndef arch_msi_msg_addr_lo
11typedef struct arch_msi_msg_addr_lo {
12 u32 address_lo;
13} __attribute__ ((packed)) arch_msi_msg_addr_lo_t;
14#endif
15
16#ifndef arch_msi_msg_addr_hi
17typedef struct arch_msi_msg_addr_hi {
18 u32 address_hi;
19} __attribute__ ((packed)) arch_msi_msg_addr_hi_t;
20#endif
21
22#ifndef arch_msi_msg_data
23typedef struct arch_msi_msg_data {
24 u32 data;
25} __attribute__ ((packed)) arch_msi_msg_data_t;
26#endif
4aa9bc95 27
8073c1ac
TG
28/**
29 * msi_msg - Representation of a MSI message
30 * @address_lo: Low 32 bits of msi message address
31 * @arch_addrlo: Architecture specific shadow of @address_lo
32 * @address_hi: High 32 bits of msi message address
33 * (only used when device supports it)
34 * @arch_addrhi: Architecture specific shadow of @address_hi
35 * @data: MSI message data (usually 16 bits)
36 * @arch_data: Architecture specific shadow of @data
37 */
3b7d1921 38struct msi_msg {
8073c1ac
TG
39 union {
40 u32 address_lo;
41 arch_msi_msg_addr_lo_t arch_addr_lo;
42 };
43 union {
44 u32 address_hi;
45 arch_msi_msg_addr_hi_t arch_addr_hi;
46 };
47 union {
48 u32 data;
49 arch_msi_msg_data_t arch_data;
50 };
3b7d1921
EB
51};
52
38737d82 53extern int pci_msi_ignore_mask;
c54c1879 54/* Helper functions */
1c9db525 55struct irq_data;
39431acb 56struct msi_desc;
25a98bd4 57struct pci_dev;
c09fcc4b 58struct platform_msi_priv_data;
2366d06e 59void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
2f44e29c 60#ifdef CONFIG_GENERIC_MSI_IRQ
2366d06e 61void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg);
2f44e29c
AB
62#else
63static inline void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg)
64{
65}
66#endif
891d4a48 67
c09fcc4b
MZ
68typedef void (*irq_write_msi_msg_t)(struct msi_desc *desc,
69 struct msi_msg *msg);
70
71/**
72 * platform_msi_desc - Platform device specific msi descriptor data
73 * @msi_priv_data: Pointer to platform private data
74 * @msi_index: The index of the MSI descriptor for multi MSI
75 */
76struct platform_msi_desc {
77 struct platform_msi_priv_data *msi_priv_data;
78 u16 msi_index;
79};
80
550308e4
GR
81/**
82 * fsl_mc_msi_desc - FSL-MC device specific msi descriptor data
83 * @msi_index: The index of the MSI descriptor
84 */
85struct fsl_mc_msi_desc {
86 u16 msi_index;
87};
88
49b32315
LV
89/**
90 * ti_sci_inta_msi_desc - TISCI based INTA specific msi descriptor data
91 * @dev_index: TISCI device index
92 */
93struct ti_sci_inta_msi_desc {
94 u16 dev_index;
95};
96
fc88419c
JL
97/**
98 * struct msi_desc - Descriptor structure for MSI based interrupts
99 * @list: List head for management
100 * @irq: The base interrupt number
101 * @nvec_used: The number of vectors used
102 * @dev: Pointer to the device which uses this descriptor
103 * @msg: The last set MSI message cached for reuse
0972fa57 104 * @affinity: Optional pointer to a cpu affinity mask for this descriptor
fc88419c 105 *
d7cc609f
LG
106 * @write_msi_msg: Callback that may be called when the MSI message
107 * address or data changes
108 * @write_msi_msg_data: Data parameter for the callback.
109 *
67961e77
TG
110 * @msi_mask: [PCI MSI] MSI cached mask bits
111 * @msix_ctrl: [PCI MSI-X] MSI-X cached per vector control bits
fc88419c
JL
112 * @is_msix: [PCI MSI/X] True if MSI-X
113 * @multiple: [PCI MSI/X] log2 num of messages allocated
114 * @multi_cap: [PCI MSI/X] log2 num of messages supported
115 * @maskbit: [PCI MSI/X] Mask-Pending bit supported?
116 * @is_64: [PCI MSI/X] Address size: 0=32bit 1=64bit
117 * @entry_nr: [PCI MSI/X] Entry which is described by this descriptor
118 * @default_irq:[PCI MSI/X] The default pre-assigned non-MSI irq
119 * @mask_pos: [PCI MSI] Mask register position
120 * @mask_base: [PCI MSI-X] Mask register base address
c09fcc4b 121 * @platform: [platform] Platform device specific msi descriptor data
87840fb5 122 * @fsl_mc: [fsl-mc] FSL MC device specific msi descriptor data
49b32315 123 * @inta: [INTA] TISCI based INTA specific msi descriptor data
fc88419c 124 */
3b7d1921 125struct msi_desc {
fc88419c
JL
126 /* Shared device/bus type independent data */
127 struct list_head list;
128 unsigned int irq;
129 unsigned int nvec_used;
130 struct device *dev;
131 struct msi_msg msg;
bec04037 132 struct irq_affinity_desc *affinity;
aaebdf8d
JG
133#ifdef CONFIG_IRQ_MSI_IOMMU
134 const void *iommu_cookie;
135#endif
3b7d1921 136
d7cc609f
LG
137 void (*write_msi_msg)(struct msi_desc *entry, void *data);
138 void *write_msi_msg_data;
139
264d9caa 140 union {
fc88419c
JL
141 /* PCI MSI/X specific data */
142 struct {
67961e77
TG
143 union {
144 u32 msi_mask;
145 u32 msix_ctrl;
146 };
fc88419c 147 struct {
ddd065e4
LG
148 u8 is_msix : 1;
149 u8 multiple : 3;
150 u8 multi_cap : 3;
151 u8 maskbit : 1;
152 u8 is_64 : 1;
d7cc609f 153 u8 is_virtual : 1;
ddd065e4 154 u16 entry_nr;
fc88419c
JL
155 unsigned default_irq;
156 } msi_attrib;
157 union {
158 u8 mask_pos;
159 void __iomem *mask_base;
160 };
161 };
3b7d1921 162
fc88419c
JL
163 /*
164 * Non PCI variants add their data structure here. New
165 * entries need to use a named structure. We want
166 * proper name spaces for this. The PCI part is
167 * anonymous for now as it would require an immediate
168 * tree wide cleanup.
169 */
c09fcc4b 170 struct platform_msi_desc platform;
550308e4 171 struct fsl_mc_msi_desc fsl_mc;
49b32315 172 struct ti_sci_inta_msi_desc inta;
fc88419c 173 };
3b7d1921
EB
174};
175
d31eb342 176/* Helpers to hide struct msi_desc implementation details */
25a98bd4 177#define msi_desc_to_dev(desc) ((desc)->dev)
4a7cc831 178#define dev_to_msi_list(dev) (&(dev)->msi_list)
d31eb342
JL
179#define first_msi_entry(dev) \
180 list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list)
181#define for_each_msi_entry(desc, dev) \
182 list_for_each_entry((desc), dev_to_msi_list((dev)), list)
81b1e6e6
MR
183#define for_each_msi_entry_safe(desc, tmp, dev) \
184 list_for_each_entry_safe((desc), (tmp), dev_to_msi_list((dev)), list)
4c457e8c
MZ
185#define for_each_msi_vector(desc, __irq, dev) \
186 for_each_msi_entry((desc), (dev)) \
187 if ((desc)->irq) \
188 for (__irq = (desc)->irq; \
189 __irq < ((desc)->irq + (desc)->nvec_used); \
190 __irq++)
d31eb342 191
aaebdf8d
JG
192#ifdef CONFIG_IRQ_MSI_IOMMU
193static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc)
194{
195 return desc->iommu_cookie;
196}
197
198static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc,
199 const void *iommu_cookie)
200{
201 desc->iommu_cookie = iommu_cookie;
202}
203#else
204static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc)
205{
206 return NULL;
207}
208
209static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc,
210 const void *iommu_cookie)
211{
212}
213#endif
214
d31eb342
JL
215#ifdef CONFIG_PCI_MSI
216#define first_pci_msi_entry(pdev) first_msi_entry(&(pdev)->dev)
217#define for_each_pci_msi_entry(desc, pdev) \
218 for_each_msi_entry((desc), &(pdev)->dev)
219
25a98bd4 220struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc);
c179c9b9 221void *msi_desc_to_pci_sysdata(struct msi_desc *desc);
2f44e29c 222void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg);
c179c9b9
JL
223#else /* CONFIG_PCI_MSI */
224static inline void *msi_desc_to_pci_sysdata(struct msi_desc *desc)
225{
226 return NULL;
227}
2f44e29c
AB
228static inline void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg)
229{
230}
d31eb342
JL
231#endif /* CONFIG_PCI_MSI */
232
28f4b041 233struct msi_desc *alloc_msi_entry(struct device *dev, int nvec,
bec04037 234 const struct irq_affinity_desc *affinity);
aa48b6f7 235void free_msi_entry(struct msi_desc *entry);
891d4a48 236void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
83a18912 237void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
83a18912 238
23ed8d57
TG
239void pci_msi_mask_irq(struct irq_data *data);
240void pci_msi_unmask_irq(struct irq_data *data);
241
2f170814
BS
242const struct attribute_group **msi_populate_sysfs(struct device *dev);
243void msi_destroy_sysfs(struct device *dev,
244 const struct attribute_group **msi_irq_groups);
245
3b7d1921 246/*
077ee78e
TG
247 * The arch hooks to setup up msi irqs. Default functions are implemented
248 * as weak symbols so that they /can/ be overriden by architecture specific
b227be0d 249 * code if needed. These hooks can only be enabled by the architecture.
077ee78e
TG
250 *
251 * If CONFIG_PCI_MSI_ARCH_FALLBACKS is not selected they are replaced by
252 * stubs with warnings.
3b7d1921 253 */
077ee78e 254#ifdef CONFIG_PCI_MSI_ARCH_FALLBACKS
f7feaca7 255int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc);
3b7d1921 256void arch_teardown_msi_irq(unsigned int irq);
2366d06e
BH
257int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
258void arch_teardown_msi_irqs(struct pci_dev *dev);
077ee78e
TG
259#else
260static inline int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
261{
262 WARN_ON_ONCE(1);
263 return -ENODEV;
264}
265
266static inline void arch_teardown_msi_irqs(struct pci_dev *dev)
267{
268 WARN_ON_ONCE(1);
269}
270#endif
271
272/*
273 * The restore hooks are still available as they are useful even
274 * for fully irq domain based setups. Courtesy to XEN/X86.
275 */
276void arch_restore_msi_irqs(struct pci_dev *dev);
ac8344c4 277void default_restore_msi_irqs(struct pci_dev *dev);
3b7d1921 278
f3cf8bb0 279#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
d9109698 280
aeeb5965 281#include <linux/irqhandler.h>
d9109698 282
f3cf8bb0 283struct irq_domain;
552c494a 284struct irq_domain_ops;
f3cf8bb0
JL
285struct irq_chip;
286struct device_node;
be5436c8 287struct fwnode_handle;
f3cf8bb0
JL
288struct msi_domain_info;
289
290/**
291 * struct msi_domain_ops - MSI interrupt domain callbacks
292 * @get_hwirq: Retrieve the resulting hw irq number
293 * @msi_init: Domain specific init function for MSI interrupts
294 * @msi_free: Domain specific function to free a MSI interrupts
d9109698
JL
295 * @msi_check: Callback for verification of the domain/info/dev data
296 * @msi_prepare: Prepare the allocation of the interrupts in the domain
1d1e8cdc 297 * @msi_finish: Optional callback to finalize the allocation
d9109698
JL
298 * @set_desc: Set the msi descriptor for an interrupt
299 * @handle_error: Optional error handler if the allocation fails
43e9e705
TG
300 * @domain_alloc_irqs: Optional function to override the default allocation
301 * function.
302 * @domain_free_irqs: Optional function to override the default free
303 * function.
d9109698
JL
304 *
305 * @get_hwirq, @msi_init and @msi_free are callbacks used by
306 * msi_create_irq_domain() and related interfaces
307 *
308 * @msi_check, @msi_prepare, @msi_finish, @set_desc and @handle_error
1d1e8cdc 309 * are callbacks used by msi_domain_alloc_irqs() and related
d9109698 310 * interfaces which are based on msi_desc.
43e9e705
TG
311 *
312 * @domain_alloc_irqs, @domain_free_irqs can be used to override the
313 * default allocation/free functions (__msi_domain_alloc/free_irqs). This
314 * is initially for a wrapper around XENs seperate MSI universe which can't
315 * be wrapped into the regular irq domains concepts by mere mortals. This
316 * allows to universally use msi_domain_alloc/free_irqs without having to
317 * special case XEN all over the place.
318 *
319 * Contrary to other operations @domain_alloc_irqs and @domain_free_irqs
320 * are set to the default implementation if NULL and even when
321 * MSI_FLAG_USE_DEF_DOM_OPS is not set to avoid breaking existing users and
322 * because these callbacks are obviously mandatory.
323 *
324 * This is NOT meant to be abused, but it can be useful to build wrappers
325 * for specialized MSI irq domains which need extra work before and after
326 * calling __msi_domain_alloc_irqs()/__msi_domain_free_irqs().
f3cf8bb0
JL
327 */
328struct msi_domain_ops {
aeeb5965
JL
329 irq_hw_number_t (*get_hwirq)(struct msi_domain_info *info,
330 msi_alloc_info_t *arg);
f3cf8bb0
JL
331 int (*msi_init)(struct irq_domain *domain,
332 struct msi_domain_info *info,
333 unsigned int virq, irq_hw_number_t hwirq,
aeeb5965 334 msi_alloc_info_t *arg);
f3cf8bb0
JL
335 void (*msi_free)(struct irq_domain *domain,
336 struct msi_domain_info *info,
337 unsigned int virq);
d9109698
JL
338 int (*msi_check)(struct irq_domain *domain,
339 struct msi_domain_info *info,
340 struct device *dev);
341 int (*msi_prepare)(struct irq_domain *domain,
342 struct device *dev, int nvec,
343 msi_alloc_info_t *arg);
344 void (*msi_finish)(msi_alloc_info_t *arg, int retval);
345 void (*set_desc)(msi_alloc_info_t *arg,
346 struct msi_desc *desc);
347 int (*handle_error)(struct irq_domain *domain,
348 struct msi_desc *desc, int error);
43e9e705
TG
349 int (*domain_alloc_irqs)(struct irq_domain *domain,
350 struct device *dev, int nvec);
351 void (*domain_free_irqs)(struct irq_domain *domain,
352 struct device *dev);
f3cf8bb0
JL
353};
354
355/**
356 * struct msi_domain_info - MSI interrupt domain data
aeeb5965
JL
357 * @flags: Flags to decribe features and capabilities
358 * @ops: The callback data structure
359 * @chip: Optional: associated interrupt chip
360 * @chip_data: Optional: associated interrupt chip data
361 * @handler: Optional: associated interrupt flow handler
362 * @handler_data: Optional: associated interrupt flow handler data
363 * @handler_name: Optional: associated interrupt flow handler name
364 * @data: Optional: domain specific data
f3cf8bb0
JL
365 */
366struct msi_domain_info {
aeeb5965 367 u32 flags;
f3cf8bb0
JL
368 struct msi_domain_ops *ops;
369 struct irq_chip *chip;
aeeb5965
JL
370 void *chip_data;
371 irq_flow_handler_t handler;
372 void *handler_data;
373 const char *handler_name;
f3cf8bb0
JL
374 void *data;
375};
376
aeeb5965
JL
377/* Flags for msi_domain_info */
378enum {
379 /*
380 * Init non implemented ops callbacks with default MSI domain
381 * callbacks.
382 */
383 MSI_FLAG_USE_DEF_DOM_OPS = (1 << 0),
384 /*
385 * Init non implemented chip callbacks with default MSI chip
386 * callbacks.
387 */
388 MSI_FLAG_USE_DEF_CHIP_OPS = (1 << 1),
aeeb5965 389 /* Support multiple PCI MSI interrupts */
b6140914 390 MSI_FLAG_MULTI_PCI_MSI = (1 << 2),
aeeb5965 391 /* Support PCI MSIX interrupts */
b6140914 392 MSI_FLAG_PCI_MSIX = (1 << 3),
f3b0946d
MZ
393 /* Needs early activate, required for PCI */
394 MSI_FLAG_ACTIVATE_EARLY = (1 << 4),
22d0b12f
TG
395 /*
396 * Must reactivate when irq is started even when
397 * MSI_FLAG_ACTIVATE_EARLY has been set.
398 */
399 MSI_FLAG_MUST_REACTIVATE = (1 << 5),
0be8153c
MZ
400 /* Is level-triggered capable, using two messages */
401 MSI_FLAG_LEVEL_CAPABLE = (1 << 6),
aeeb5965
JL
402};
403
f3cf8bb0
JL
404int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask,
405 bool force);
406
be5436c8 407struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode,
f3cf8bb0
JL
408 struct msi_domain_info *info,
409 struct irq_domain *parent);
43e9e705
TG
410int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
411 int nvec);
d9109698
JL
412int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
413 int nvec);
43e9e705 414void __msi_domain_free_irqs(struct irq_domain *domain, struct device *dev);
d9109698 415void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev);
f3cf8bb0
JL
416struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain);
417
be5436c8 418struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode,
c09fcc4b
MZ
419 struct msi_domain_info *info,
420 struct irq_domain *parent);
421int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec,
422 irq_write_msi_msg_t write_msi_msg);
423void platform_msi_domain_free_irqs(struct device *dev);
b2eba39b
MZ
424
425/* When an MSI domain is used as an intermediate domain */
426int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev,
427 int nvec, msi_alloc_info_t *args);
2145ac93
MZ
428int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev,
429 int virq, int nvec, msi_alloc_info_t *args);
552c494a 430struct irq_domain *
1f83515b
MZ
431__platform_msi_create_device_domain(struct device *dev,
432 unsigned int nvec,
433 bool is_tree,
434 irq_write_msi_msg_t write_msi_msg,
435 const struct irq_domain_ops *ops,
436 void *host_data);
437
438#define platform_msi_create_device_domain(dev, nvec, write, ops, data) \
439 __platform_msi_create_device_domain(dev, nvec, false, write, ops, data)
440#define platform_msi_create_device_tree_domain(dev, nvec, write, ops, data) \
441 __platform_msi_create_device_domain(dev, nvec, true, write, ops, data)
442
552c494a
MZ
443int platform_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
444 unsigned int nr_irqs);
445void platform_msi_domain_free(struct irq_domain *domain, unsigned int virq,
446 unsigned int nvec);
447void *platform_msi_get_host_data(struct irq_domain *domain);
f3cf8bb0
JL
448#endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */
449
3878eaef
JL
450#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
451void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg);
be5436c8 452struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
3878eaef
JL
453 struct msi_domain_info *info,
454 struct irq_domain *parent);
3878eaef
JL
455int pci_msi_domain_check_cap(struct irq_domain *domain,
456 struct msi_domain_info *info, struct device *dev);
b6eec9b7 457u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev);
54fa97ee 458struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev);
2fd60266 459bool pci_dev_has_special_msi_domain(struct pci_dev *pdev);
54fa97ee
MZ
460#else
461static inline struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev)
462{
463 return NULL;
464}
3878eaef
JL
465#endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */
466
3b7d1921 467#endif /* LINUX_MSI_H */