]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - include/linux/io-mapping.h
Merge tag 'for-linus-urgent' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[mirror_ubuntu-jammy-kernel.git] / include / linux / io-mapping.h
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright © 2008 Keith Packard <keithp@keithp.com>
4 */
5
6 #ifndef _LINUX_IO_MAPPING_H
7 #define _LINUX_IO_MAPPING_H
8
9 #include <linux/types.h>
10 #include <linux/slab.h>
11 #include <linux/bug.h>
12 #include <linux/io.h>
13 #include <linux/pgtable.h>
14 #include <asm/page.h>
15
16 /*
17 * The io_mapping mechanism provides an abstraction for mapping
18 * individual pages from an io device to the CPU in an efficient fashion.
19 *
20 * See Documentation/driver-api/io-mapping.rst
21 */
22
23 struct io_mapping {
24 resource_size_t base;
25 unsigned long size;
26 pgprot_t prot;
27 void __iomem *iomem;
28 };
29
30 #ifdef CONFIG_HAVE_ATOMIC_IOMAP
31
32 #include <linux/pfn.h>
33 #include <asm/iomap.h>
34 /*
35 * For small address space machines, mapping large objects
36 * into the kernel virtual space isn't practical. Where
37 * available, use fixmap support to dynamically map pages
38 * of the object at run time.
39 */
40
41 static inline struct io_mapping *
42 io_mapping_init_wc(struct io_mapping *iomap,
43 resource_size_t base,
44 unsigned long size)
45 {
46 pgprot_t prot;
47
48 if (iomap_create_wc(base, size, &prot))
49 return NULL;
50
51 iomap->base = base;
52 iomap->size = size;
53 iomap->prot = prot;
54 return iomap;
55 }
56
57 static inline void
58 io_mapping_fini(struct io_mapping *mapping)
59 {
60 iomap_free(mapping->base, mapping->size);
61 }
62
63 /* Atomic map/unmap */
64 static inline void __iomem *
65 io_mapping_map_atomic_wc(struct io_mapping *mapping,
66 unsigned long offset)
67 {
68 resource_size_t phys_addr;
69
70 BUG_ON(offset >= mapping->size);
71 phys_addr = mapping->base + offset;
72 preempt_disable();
73 pagefault_disable();
74 return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot);
75 }
76
77 static inline void
78 io_mapping_unmap_atomic(void __iomem *vaddr)
79 {
80 kunmap_local_indexed((void __force *)vaddr);
81 pagefault_enable();
82 preempt_enable();
83 }
84
85 static inline void __iomem *
86 io_mapping_map_local_wc(struct io_mapping *mapping, unsigned long offset)
87 {
88 resource_size_t phys_addr;
89
90 BUG_ON(offset >= mapping->size);
91 phys_addr = mapping->base + offset;
92 return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot);
93 }
94
95 static inline void io_mapping_unmap_local(void __iomem *vaddr)
96 {
97 kunmap_local_indexed((void __force *)vaddr);
98 }
99
100 static inline void __iomem *
101 io_mapping_map_wc(struct io_mapping *mapping,
102 unsigned long offset,
103 unsigned long size)
104 {
105 resource_size_t phys_addr;
106
107 BUG_ON(offset >= mapping->size);
108 phys_addr = mapping->base + offset;
109
110 return ioremap_wc(phys_addr, size);
111 }
112
113 static inline void
114 io_mapping_unmap(void __iomem *vaddr)
115 {
116 iounmap(vaddr);
117 }
118
119 #else /* HAVE_ATOMIC_IOMAP */
120
121 #include <linux/uaccess.h>
122
123 /* Create the io_mapping object*/
124 static inline struct io_mapping *
125 io_mapping_init_wc(struct io_mapping *iomap,
126 resource_size_t base,
127 unsigned long size)
128 {
129 iomap->iomem = ioremap_wc(base, size);
130 if (!iomap->iomem)
131 return NULL;
132
133 iomap->base = base;
134 iomap->size = size;
135 #if defined(pgprot_noncached_wc) /* archs can't agree on a name ... */
136 iomap->prot = pgprot_noncached_wc(PAGE_KERNEL);
137 #elif defined(pgprot_writecombine)
138 iomap->prot = pgprot_writecombine(PAGE_KERNEL);
139 #else
140 iomap->prot = pgprot_noncached(PAGE_KERNEL);
141 #endif
142
143 return iomap;
144 }
145
146 static inline void
147 io_mapping_fini(struct io_mapping *mapping)
148 {
149 iounmap(mapping->iomem);
150 }
151
152 /* Non-atomic map/unmap */
153 static inline void __iomem *
154 io_mapping_map_wc(struct io_mapping *mapping,
155 unsigned long offset,
156 unsigned long size)
157 {
158 return mapping->iomem + offset;
159 }
160
161 static inline void
162 io_mapping_unmap(void __iomem *vaddr)
163 {
164 }
165
166 /* Atomic map/unmap */
167 static inline void __iomem *
168 io_mapping_map_atomic_wc(struct io_mapping *mapping,
169 unsigned long offset)
170 {
171 preempt_disable();
172 pagefault_disable();
173 return io_mapping_map_wc(mapping, offset, PAGE_SIZE);
174 }
175
176 static inline void
177 io_mapping_unmap_atomic(void __iomem *vaddr)
178 {
179 io_mapping_unmap(vaddr);
180 pagefault_enable();
181 preempt_enable();
182 }
183
184 static inline void __iomem *
185 io_mapping_map_local_wc(struct io_mapping *mapping, unsigned long offset)
186 {
187 return io_mapping_map_wc(mapping, offset, PAGE_SIZE);
188 }
189
190 static inline void io_mapping_unmap_local(void __iomem *vaddr)
191 {
192 io_mapping_unmap(vaddr);
193 }
194
195 #endif /* !HAVE_ATOMIC_IOMAP */
196
197 static inline struct io_mapping *
198 io_mapping_create_wc(resource_size_t base,
199 unsigned long size)
200 {
201 struct io_mapping *iomap;
202
203 iomap = kmalloc(sizeof(*iomap), GFP_KERNEL);
204 if (!iomap)
205 return NULL;
206
207 if (!io_mapping_init_wc(iomap, base, size)) {
208 kfree(iomap);
209 return NULL;
210 }
211
212 return iomap;
213 }
214
215 static inline void
216 io_mapping_free(struct io_mapping *iomap)
217 {
218 io_mapping_fini(iomap);
219 kfree(iomap);
220 }
221
222 #endif /* _LINUX_IO_MAPPING_H */
223
224 int io_mapping_map_user(struct io_mapping *iomap, struct vm_area_struct *vma,
225 unsigned long addr, unsigned long pfn, unsigned long size);