]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - include/linux/msi.h
Merge tag 'for-linus-urgent' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[mirror_ubuntu-jammy-kernel.git] / include / linux / msi.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef LINUX_MSI_H
3 #define LINUX_MSI_H
4
5 #include <linux/kobject.h>
6 #include <linux/list.h>
7 #include <asm/msi.h>
8
9 /* Dummy shadow structures if an architecture does not define them */
10 #ifndef arch_msi_msg_addr_lo
11 typedef struct arch_msi_msg_addr_lo {
12 u32 address_lo;
13 } __attribute__ ((packed)) arch_msi_msg_addr_lo_t;
14 #endif
15
16 #ifndef arch_msi_msg_addr_hi
17 typedef struct arch_msi_msg_addr_hi {
18 u32 address_hi;
19 } __attribute__ ((packed)) arch_msi_msg_addr_hi_t;
20 #endif
21
22 #ifndef arch_msi_msg_data
23 typedef struct arch_msi_msg_data {
24 u32 data;
25 } __attribute__ ((packed)) arch_msi_msg_data_t;
26 #endif
27
28 /**
29 * msi_msg - Representation of a MSI message
30 * @address_lo: Low 32 bits of msi message address
31 * @arch_addrlo: Architecture specific shadow of @address_lo
32 * @address_hi: High 32 bits of msi message address
33 * (only used when device supports it)
34 * @arch_addrhi: Architecture specific shadow of @address_hi
35 * @data: MSI message data (usually 16 bits)
36 * @arch_data: Architecture specific shadow of @data
37 */
38 struct msi_msg {
39 union {
40 u32 address_lo;
41 arch_msi_msg_addr_lo_t arch_addr_lo;
42 };
43 union {
44 u32 address_hi;
45 arch_msi_msg_addr_hi_t arch_addr_hi;
46 };
47 union {
48 u32 data;
49 arch_msi_msg_data_t arch_data;
50 };
51 };
52
53 extern int pci_msi_ignore_mask;
54 /* Helper functions */
55 struct irq_data;
56 struct msi_desc;
57 struct pci_dev;
58 struct platform_msi_priv_data;
59 void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
60 #ifdef CONFIG_GENERIC_MSI_IRQ
61 void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg);
62 #else
63 static inline void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg)
64 {
65 }
66 #endif
67
68 typedef void (*irq_write_msi_msg_t)(struct msi_desc *desc,
69 struct msi_msg *msg);
70
71 /**
72 * platform_msi_desc - Platform device specific msi descriptor data
73 * @msi_priv_data: Pointer to platform private data
74 * @msi_index: The index of the MSI descriptor for multi MSI
75 */
76 struct platform_msi_desc {
77 struct platform_msi_priv_data *msi_priv_data;
78 u16 msi_index;
79 };
80
81 /**
82 * fsl_mc_msi_desc - FSL-MC device specific msi descriptor data
83 * @msi_index: The index of the MSI descriptor
84 */
85 struct fsl_mc_msi_desc {
86 u16 msi_index;
87 };
88
89 /**
90 * ti_sci_inta_msi_desc - TISCI based INTA specific msi descriptor data
91 * @dev_index: TISCI device index
92 */
93 struct ti_sci_inta_msi_desc {
94 u16 dev_index;
95 };
96
97 /**
98 * struct msi_desc - Descriptor structure for MSI based interrupts
99 * @list: List head for management
100 * @irq: The base interrupt number
101 * @nvec_used: The number of vectors used
102 * @dev: Pointer to the device which uses this descriptor
103 * @msg: The last set MSI message cached for reuse
104 * @affinity: Optional pointer to a cpu affinity mask for this descriptor
105 *
106 * @write_msi_msg: Callback that may be called when the MSI message
107 * address or data changes
108 * @write_msi_msg_data: Data parameter for the callback.
109 *
110 * @masked: [PCI MSI/X] Mask bits
111 * @is_msix: [PCI MSI/X] True if MSI-X
112 * @multiple: [PCI MSI/X] log2 num of messages allocated
113 * @multi_cap: [PCI MSI/X] log2 num of messages supported
114 * @maskbit: [PCI MSI/X] Mask-Pending bit supported?
115 * @is_64: [PCI MSI/X] Address size: 0=32bit 1=64bit
116 * @entry_nr: [PCI MSI/X] Entry which is described by this descriptor
117 * @default_irq:[PCI MSI/X] The default pre-assigned non-MSI irq
118 * @mask_pos: [PCI MSI] Mask register position
119 * @mask_base: [PCI MSI-X] Mask register base address
120 * @platform: [platform] Platform device specific msi descriptor data
121 * @fsl_mc: [fsl-mc] FSL MC device specific msi descriptor data
122 * @inta: [INTA] TISCI based INTA specific msi descriptor data
123 */
124 struct msi_desc {
125 /* Shared device/bus type independent data */
126 struct list_head list;
127 unsigned int irq;
128 unsigned int nvec_used;
129 struct device *dev;
130 struct msi_msg msg;
131 struct irq_affinity_desc *affinity;
132 #ifdef CONFIG_IRQ_MSI_IOMMU
133 const void *iommu_cookie;
134 #endif
135
136 void (*write_msi_msg)(struct msi_desc *entry, void *data);
137 void *write_msi_msg_data;
138
139 union {
140 /* PCI MSI/X specific data */
141 struct {
142 u32 masked;
143 struct {
144 u8 is_msix : 1;
145 u8 multiple : 3;
146 u8 multi_cap : 3;
147 u8 maskbit : 1;
148 u8 is_64 : 1;
149 u8 is_virtual : 1;
150 u16 entry_nr;
151 unsigned default_irq;
152 } msi_attrib;
153 union {
154 u8 mask_pos;
155 void __iomem *mask_base;
156 };
157 };
158
159 /*
160 * Non PCI variants add their data structure here. New
161 * entries need to use a named structure. We want
162 * proper name spaces for this. The PCI part is
163 * anonymous for now as it would require an immediate
164 * tree wide cleanup.
165 */
166 struct platform_msi_desc platform;
167 struct fsl_mc_msi_desc fsl_mc;
168 struct ti_sci_inta_msi_desc inta;
169 };
170 };
171
172 /* Helpers to hide struct msi_desc implementation details */
173 #define msi_desc_to_dev(desc) ((desc)->dev)
174 #define dev_to_msi_list(dev) (&(dev)->msi_list)
175 #define first_msi_entry(dev) \
176 list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list)
177 #define for_each_msi_entry(desc, dev) \
178 list_for_each_entry((desc), dev_to_msi_list((dev)), list)
179 #define for_each_msi_entry_safe(desc, tmp, dev) \
180 list_for_each_entry_safe((desc), (tmp), dev_to_msi_list((dev)), list)
181 #define for_each_msi_vector(desc, __irq, dev) \
182 for_each_msi_entry((desc), (dev)) \
183 if ((desc)->irq) \
184 for (__irq = (desc)->irq; \
185 __irq < ((desc)->irq + (desc)->nvec_used); \
186 __irq++)
187
188 #ifdef CONFIG_IRQ_MSI_IOMMU
189 static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc)
190 {
191 return desc->iommu_cookie;
192 }
193
194 static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc,
195 const void *iommu_cookie)
196 {
197 desc->iommu_cookie = iommu_cookie;
198 }
199 #else
200 static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc)
201 {
202 return NULL;
203 }
204
205 static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc,
206 const void *iommu_cookie)
207 {
208 }
209 #endif
210
211 #ifdef CONFIG_PCI_MSI
212 #define first_pci_msi_entry(pdev) first_msi_entry(&(pdev)->dev)
213 #define for_each_pci_msi_entry(desc, pdev) \
214 for_each_msi_entry((desc), &(pdev)->dev)
215
216 struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc);
217 void *msi_desc_to_pci_sysdata(struct msi_desc *desc);
218 void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg);
219 #else /* CONFIG_PCI_MSI */
220 static inline void *msi_desc_to_pci_sysdata(struct msi_desc *desc)
221 {
222 return NULL;
223 }
224 static inline void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg)
225 {
226 }
227 #endif /* CONFIG_PCI_MSI */
228
229 struct msi_desc *alloc_msi_entry(struct device *dev, int nvec,
230 const struct irq_affinity_desc *affinity);
231 void free_msi_entry(struct msi_desc *entry);
232 void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
233 void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
234
235 u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag);
236 u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag);
237 void pci_msi_mask_irq(struct irq_data *data);
238 void pci_msi_unmask_irq(struct irq_data *data);
239
240 /*
241 * The arch hooks to setup up msi irqs. Default functions are implemented
242 * as weak symbols so that they /can/ be overriden by architecture specific
243 * code if needed. These hooks can only be enabled by the architecture.
244 *
245 * If CONFIG_PCI_MSI_ARCH_FALLBACKS is not selected they are replaced by
246 * stubs with warnings.
247 */
248 #ifdef CONFIG_PCI_MSI_ARCH_FALLBACKS
249 int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc);
250 void arch_teardown_msi_irq(unsigned int irq);
251 int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
252 void arch_teardown_msi_irqs(struct pci_dev *dev);
253 #else
254 static inline int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
255 {
256 WARN_ON_ONCE(1);
257 return -ENODEV;
258 }
259
260 static inline void arch_teardown_msi_irqs(struct pci_dev *dev)
261 {
262 WARN_ON_ONCE(1);
263 }
264 #endif
265
266 /*
267 * The restore hooks are still available as they are useful even
268 * for fully irq domain based setups. Courtesy to XEN/X86.
269 */
270 void arch_restore_msi_irqs(struct pci_dev *dev);
271 void default_restore_msi_irqs(struct pci_dev *dev);
272
273 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
274
275 #include <linux/irqhandler.h>
276
277 struct irq_domain;
278 struct irq_domain_ops;
279 struct irq_chip;
280 struct device_node;
281 struct fwnode_handle;
282 struct msi_domain_info;
283
284 /**
285 * struct msi_domain_ops - MSI interrupt domain callbacks
286 * @get_hwirq: Retrieve the resulting hw irq number
287 * @msi_init: Domain specific init function for MSI interrupts
288 * @msi_free: Domain specific function to free a MSI interrupts
289 * @msi_check: Callback for verification of the domain/info/dev data
290 * @msi_prepare: Prepare the allocation of the interrupts in the domain
291 * @msi_finish: Optional callback to finalize the allocation
292 * @set_desc: Set the msi descriptor for an interrupt
293 * @handle_error: Optional error handler if the allocation fails
294 * @domain_alloc_irqs: Optional function to override the default allocation
295 * function.
296 * @domain_free_irqs: Optional function to override the default free
297 * function.
298 *
299 * @get_hwirq, @msi_init and @msi_free are callbacks used by
300 * msi_create_irq_domain() and related interfaces
301 *
302 * @msi_check, @msi_prepare, @msi_finish, @set_desc and @handle_error
303 * are callbacks used by msi_domain_alloc_irqs() and related
304 * interfaces which are based on msi_desc.
305 *
306 * @domain_alloc_irqs, @domain_free_irqs can be used to override the
307 * default allocation/free functions (__msi_domain_alloc/free_irqs). This
308 * is initially for a wrapper around XENs seperate MSI universe which can't
309 * be wrapped into the regular irq domains concepts by mere mortals. This
310 * allows to universally use msi_domain_alloc/free_irqs without having to
311 * special case XEN all over the place.
312 *
313 * Contrary to other operations @domain_alloc_irqs and @domain_free_irqs
314 * are set to the default implementation if NULL and even when
315 * MSI_FLAG_USE_DEF_DOM_OPS is not set to avoid breaking existing users and
316 * because these callbacks are obviously mandatory.
317 *
318 * This is NOT meant to be abused, but it can be useful to build wrappers
319 * for specialized MSI irq domains which need extra work before and after
320 * calling __msi_domain_alloc_irqs()/__msi_domain_free_irqs().
321 */
322 struct msi_domain_ops {
323 irq_hw_number_t (*get_hwirq)(struct msi_domain_info *info,
324 msi_alloc_info_t *arg);
325 int (*msi_init)(struct irq_domain *domain,
326 struct msi_domain_info *info,
327 unsigned int virq, irq_hw_number_t hwirq,
328 msi_alloc_info_t *arg);
329 void (*msi_free)(struct irq_domain *domain,
330 struct msi_domain_info *info,
331 unsigned int virq);
332 int (*msi_check)(struct irq_domain *domain,
333 struct msi_domain_info *info,
334 struct device *dev);
335 int (*msi_prepare)(struct irq_domain *domain,
336 struct device *dev, int nvec,
337 msi_alloc_info_t *arg);
338 void (*msi_finish)(msi_alloc_info_t *arg, int retval);
339 void (*set_desc)(msi_alloc_info_t *arg,
340 struct msi_desc *desc);
341 int (*handle_error)(struct irq_domain *domain,
342 struct msi_desc *desc, int error);
343 int (*domain_alloc_irqs)(struct irq_domain *domain,
344 struct device *dev, int nvec);
345 void (*domain_free_irqs)(struct irq_domain *domain,
346 struct device *dev);
347 };
348
349 /**
350 * struct msi_domain_info - MSI interrupt domain data
351 * @flags: Flags to decribe features and capabilities
352 * @ops: The callback data structure
353 * @chip: Optional: associated interrupt chip
354 * @chip_data: Optional: associated interrupt chip data
355 * @handler: Optional: associated interrupt flow handler
356 * @handler_data: Optional: associated interrupt flow handler data
357 * @handler_name: Optional: associated interrupt flow handler name
358 * @data: Optional: domain specific data
359 */
360 struct msi_domain_info {
361 u32 flags;
362 struct msi_domain_ops *ops;
363 struct irq_chip *chip;
364 void *chip_data;
365 irq_flow_handler_t handler;
366 void *handler_data;
367 const char *handler_name;
368 void *data;
369 };
370
371 /* Flags for msi_domain_info */
372 enum {
373 /*
374 * Init non implemented ops callbacks with default MSI domain
375 * callbacks.
376 */
377 MSI_FLAG_USE_DEF_DOM_OPS = (1 << 0),
378 /*
379 * Init non implemented chip callbacks with default MSI chip
380 * callbacks.
381 */
382 MSI_FLAG_USE_DEF_CHIP_OPS = (1 << 1),
383 /* Support multiple PCI MSI interrupts */
384 MSI_FLAG_MULTI_PCI_MSI = (1 << 2),
385 /* Support PCI MSIX interrupts */
386 MSI_FLAG_PCI_MSIX = (1 << 3),
387 /* Needs early activate, required for PCI */
388 MSI_FLAG_ACTIVATE_EARLY = (1 << 4),
389 /*
390 * Must reactivate when irq is started even when
391 * MSI_FLAG_ACTIVATE_EARLY has been set.
392 */
393 MSI_FLAG_MUST_REACTIVATE = (1 << 5),
394 /* Is level-triggered capable, using two messages */
395 MSI_FLAG_LEVEL_CAPABLE = (1 << 6),
396 };
397
398 int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask,
399 bool force);
400
401 struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode,
402 struct msi_domain_info *info,
403 struct irq_domain *parent);
404 int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
405 int nvec);
406 int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
407 int nvec);
408 void __msi_domain_free_irqs(struct irq_domain *domain, struct device *dev);
409 void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev);
410 struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain);
411
412 struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode,
413 struct msi_domain_info *info,
414 struct irq_domain *parent);
415 int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec,
416 irq_write_msi_msg_t write_msi_msg);
417 void platform_msi_domain_free_irqs(struct device *dev);
418
419 /* When an MSI domain is used as an intermediate domain */
420 int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev,
421 int nvec, msi_alloc_info_t *args);
422 int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev,
423 int virq, int nvec, msi_alloc_info_t *args);
424 struct irq_domain *
425 __platform_msi_create_device_domain(struct device *dev,
426 unsigned int nvec,
427 bool is_tree,
428 irq_write_msi_msg_t write_msi_msg,
429 const struct irq_domain_ops *ops,
430 void *host_data);
431
432 #define platform_msi_create_device_domain(dev, nvec, write, ops, data) \
433 __platform_msi_create_device_domain(dev, nvec, false, write, ops, data)
434 #define platform_msi_create_device_tree_domain(dev, nvec, write, ops, data) \
435 __platform_msi_create_device_domain(dev, nvec, true, write, ops, data)
436
437 int platform_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
438 unsigned int nr_irqs);
439 void platform_msi_domain_free(struct irq_domain *domain, unsigned int virq,
440 unsigned int nvec);
441 void *platform_msi_get_host_data(struct irq_domain *domain);
442 #endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */
443
444 #ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
445 void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg);
446 struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
447 struct msi_domain_info *info,
448 struct irq_domain *parent);
449 int pci_msi_domain_check_cap(struct irq_domain *domain,
450 struct msi_domain_info *info, struct device *dev);
451 u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev);
452 struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev);
453 bool pci_dev_has_special_msi_domain(struct pci_dev *pdev);
454 #else
455 static inline struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev)
456 {
457 return NULL;
458 }
459 #endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */
460
461 #endif /* LINUX_MSI_H */