]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/x86/include/asm/xen/page.h
License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / include / asm / xen / page.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_XEN_PAGE_H
3 #define _ASM_X86_XEN_PAGE_H
4
5 #include <linux/kernel.h>
6 #include <linux/types.h>
7 #include <linux/spinlock.h>
8 #include <linux/pfn.h>
9 #include <linux/mm.h>
10 #include <linux/device.h>
11
12 #include <linux/uaccess.h>
13 #include <asm/page.h>
14 #include <asm/pgtable.h>
15
16 #include <xen/interface/xen.h>
17 #include <xen/interface/grant_table.h>
18 #include <xen/features.h>
19
20 /* Xen machine address */
21 typedef struct xmaddr {
22 phys_addr_t maddr;
23 } xmaddr_t;
24
25 /* Xen pseudo-physical address */
26 typedef struct xpaddr {
27 phys_addr_t paddr;
28 } xpaddr_t;
29
30 #define XMADDR(x) ((xmaddr_t) { .maddr = (x) })
31 #define XPADDR(x) ((xpaddr_t) { .paddr = (x) })
32
33 /**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
34 #define INVALID_P2M_ENTRY (~0UL)
35 #define FOREIGN_FRAME_BIT (1UL<<(BITS_PER_LONG-1))
36 #define IDENTITY_FRAME_BIT (1UL<<(BITS_PER_LONG-2))
37 #define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT)
38 #define IDENTITY_FRAME(m) ((m) | IDENTITY_FRAME_BIT)
39
40 #define P2M_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
41
42 extern unsigned long *machine_to_phys_mapping;
43 extern unsigned long machine_to_phys_nr;
44 extern unsigned long *xen_p2m_addr;
45 extern unsigned long xen_p2m_size;
46 extern unsigned long xen_max_p2m_pfn;
47
48 extern int xen_alloc_p2m_entry(unsigned long pfn);
49
50 extern unsigned long get_phys_to_machine(unsigned long pfn);
51 extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn);
52 extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
53 extern unsigned long __init set_phys_range_identity(unsigned long pfn_s,
54 unsigned long pfn_e);
55
56 #ifdef CONFIG_XEN_PV
57 extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
58 struct gnttab_map_grant_ref *kmap_ops,
59 struct page **pages, unsigned int count);
60 extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
61 struct gnttab_unmap_grant_ref *kunmap_ops,
62 struct page **pages, unsigned int count);
63 #else
64 static inline int
65 set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
66 struct gnttab_map_grant_ref *kmap_ops,
67 struct page **pages, unsigned int count)
68 {
69 return 0;
70 }
71
72 static inline int
73 clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
74 struct gnttab_unmap_grant_ref *kunmap_ops,
75 struct page **pages, unsigned int count)
76 {
77 return 0;
78 }
79 #endif
80
81 /*
82 * Helper functions to write or read unsigned long values to/from
83 * memory, when the access may fault.
84 */
85 static inline int xen_safe_write_ulong(unsigned long *addr, unsigned long val)
86 {
87 return __put_user(val, (unsigned long __user *)addr);
88 }
89
90 static inline int xen_safe_read_ulong(unsigned long *addr, unsigned long *val)
91 {
92 return __get_user(*val, (unsigned long __user *)addr);
93 }
94
95 #ifdef CONFIG_XEN_PV
96 /*
97 * When to use pfn_to_mfn(), __pfn_to_mfn() or get_phys_to_machine():
98 * - pfn_to_mfn() returns either INVALID_P2M_ENTRY or the mfn. No indicator
99 * bits (identity or foreign) are set.
100 * - __pfn_to_mfn() returns the found entry of the p2m table. A possibly set
101 * identity or foreign indicator will be still set. __pfn_to_mfn() is
102 * encapsulating get_phys_to_machine() which is called in special cases only.
103 * - get_phys_to_machine() is to be called by __pfn_to_mfn() only in special
104 * cases needing an extended handling.
105 */
106 static inline unsigned long __pfn_to_mfn(unsigned long pfn)
107 {
108 unsigned long mfn;
109
110 if (pfn < xen_p2m_size)
111 mfn = xen_p2m_addr[pfn];
112 else if (unlikely(pfn < xen_max_p2m_pfn))
113 return get_phys_to_machine(pfn);
114 else
115 return IDENTITY_FRAME(pfn);
116
117 if (unlikely(mfn == INVALID_P2M_ENTRY))
118 return get_phys_to_machine(pfn);
119
120 return mfn;
121 }
122 #else
123 static inline unsigned long __pfn_to_mfn(unsigned long pfn)
124 {
125 return pfn;
126 }
127 #endif
128
129 static inline unsigned long pfn_to_mfn(unsigned long pfn)
130 {
131 unsigned long mfn;
132
133 /*
134 * Some x86 code are still using pfn_to_mfn instead of
135 * pfn_to_mfn. This will have to be removed when we figured
136 * out which call.
137 */
138 if (xen_feature(XENFEAT_auto_translated_physmap))
139 return pfn;
140
141 mfn = __pfn_to_mfn(pfn);
142
143 if (mfn != INVALID_P2M_ENTRY)
144 mfn &= ~(FOREIGN_FRAME_BIT | IDENTITY_FRAME_BIT);
145
146 return mfn;
147 }
148
149 static inline int phys_to_machine_mapping_valid(unsigned long pfn)
150 {
151 if (xen_feature(XENFEAT_auto_translated_physmap))
152 return 1;
153
154 return __pfn_to_mfn(pfn) != INVALID_P2M_ENTRY;
155 }
156
157 static inline unsigned long mfn_to_pfn_no_overrides(unsigned long mfn)
158 {
159 unsigned long pfn;
160 int ret;
161
162 if (unlikely(mfn >= machine_to_phys_nr))
163 return ~0;
164
165 /*
166 * The array access can fail (e.g., device space beyond end of RAM).
167 * In such cases it doesn't matter what we return (we return garbage),
168 * but we must handle the fault without crashing!
169 */
170 ret = xen_safe_read_ulong(&machine_to_phys_mapping[mfn], &pfn);
171 if (ret < 0)
172 return ~0;
173
174 return pfn;
175 }
176
177 static inline unsigned long mfn_to_pfn(unsigned long mfn)
178 {
179 unsigned long pfn;
180
181 /*
182 * Some x86 code are still using mfn_to_pfn instead of
183 * gfn_to_pfn. This will have to be removed when we figure
184 * out which call.
185 */
186 if (xen_feature(XENFEAT_auto_translated_physmap))
187 return mfn;
188
189 pfn = mfn_to_pfn_no_overrides(mfn);
190 if (__pfn_to_mfn(pfn) != mfn)
191 pfn = ~0;
192
193 /*
194 * pfn is ~0 if there are no entries in the m2p for mfn or the
195 * entry doesn't map back to the mfn.
196 */
197 if (pfn == ~0 && __pfn_to_mfn(mfn) == IDENTITY_FRAME(mfn))
198 pfn = mfn;
199
200 return pfn;
201 }
202
203 static inline xmaddr_t phys_to_machine(xpaddr_t phys)
204 {
205 unsigned offset = phys.paddr & ~PAGE_MASK;
206 return XMADDR(PFN_PHYS(pfn_to_mfn(PFN_DOWN(phys.paddr))) | offset);
207 }
208
209 static inline xpaddr_t machine_to_phys(xmaddr_t machine)
210 {
211 unsigned offset = machine.maddr & ~PAGE_MASK;
212 return XPADDR(PFN_PHYS(mfn_to_pfn(PFN_DOWN(machine.maddr))) | offset);
213 }
214
215 /* Pseudo-physical <-> Guest conversion */
216 static inline unsigned long pfn_to_gfn(unsigned long pfn)
217 {
218 if (xen_feature(XENFEAT_auto_translated_physmap))
219 return pfn;
220 else
221 return pfn_to_mfn(pfn);
222 }
223
224 static inline unsigned long gfn_to_pfn(unsigned long gfn)
225 {
226 if (xen_feature(XENFEAT_auto_translated_physmap))
227 return gfn;
228 else
229 return mfn_to_pfn(gfn);
230 }
231
232 /* Pseudo-physical <-> Bus conversion */
233 #define pfn_to_bfn(pfn) pfn_to_gfn(pfn)
234 #define bfn_to_pfn(bfn) gfn_to_pfn(bfn)
235
236 /*
237 * We detect special mappings in one of two ways:
238 * 1. If the MFN is an I/O page then Xen will set the m2p entry
239 * to be outside our maximum possible pseudophys range.
240 * 2. If the MFN belongs to a different domain then we will certainly
241 * not have MFN in our p2m table. Conversely, if the page is ours,
242 * then we'll have p2m(m2p(MFN))==MFN.
243 * If we detect a special mapping then it doesn't have a 'struct page'.
244 * We force !pfn_valid() by returning an out-of-range pointer.
245 *
246 * NB. These checks require that, for any MFN that is not in our reservation,
247 * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if
248 * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN.
249 * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety.
250 *
251 * NB2. When deliberately mapping foreign pages into the p2m table, you *must*
252 * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
253 * require. In all the cases we care about, the FOREIGN_FRAME bit is
254 * masked (e.g., pfn_to_mfn()) so behaviour there is correct.
255 */
256 static inline unsigned long bfn_to_local_pfn(unsigned long mfn)
257 {
258 unsigned long pfn;
259
260 if (xen_feature(XENFEAT_auto_translated_physmap))
261 return mfn;
262
263 pfn = mfn_to_pfn(mfn);
264 if (__pfn_to_mfn(pfn) != mfn)
265 return -1; /* force !pfn_valid() */
266 return pfn;
267 }
268
269 /* VIRT <-> MACHINE conversion */
270 #define virt_to_machine(v) (phys_to_machine(XPADDR(__pa(v))))
271 #define virt_to_pfn(v) (PFN_DOWN(__pa(v)))
272 #define virt_to_mfn(v) (pfn_to_mfn(virt_to_pfn(v)))
273 #define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT))
274
275 /* VIRT <-> GUEST conversion */
276 #define virt_to_gfn(v) (pfn_to_gfn(virt_to_pfn(v)))
277 #define gfn_to_virt(g) (__va(gfn_to_pfn(g) << PAGE_SHIFT))
278
279 static inline unsigned long pte_mfn(pte_t pte)
280 {
281 return (pte.pte & PTE_PFN_MASK) >> PAGE_SHIFT;
282 }
283
284 static inline pte_t mfn_pte(unsigned long page_nr, pgprot_t pgprot)
285 {
286 pte_t pte;
287
288 pte.pte = ((phys_addr_t)page_nr << PAGE_SHIFT) |
289 massage_pgprot(pgprot);
290
291 return pte;
292 }
293
294 static inline pteval_t pte_val_ma(pte_t pte)
295 {
296 return pte.pte;
297 }
298
299 static inline pte_t __pte_ma(pteval_t x)
300 {
301 return (pte_t) { .pte = x };
302 }
303
304 #define pmd_val_ma(v) ((v).pmd)
305 #ifdef __PAGETABLE_PUD_FOLDED
306 #define pud_val_ma(v) ((v).p4d.pgd.pgd)
307 #else
308 #define pud_val_ma(v) ((v).pud)
309 #endif
310 #define __pmd_ma(x) ((pmd_t) { (x) } )
311
312 #ifdef __PAGETABLE_P4D_FOLDED
313 #define p4d_val_ma(x) ((x).pgd.pgd)
314 #else
315 #define p4d_val_ma(x) ((x).p4d)
316 #endif
317
318 xmaddr_t arbitrary_virt_to_machine(void *address);
319 unsigned long arbitrary_virt_to_mfn(void *vaddr);
320 void make_lowmem_page_readonly(void *vaddr);
321 void make_lowmem_page_readwrite(void *vaddr);
322
323 #define xen_remap(cookie, size) ioremap((cookie), (size));
324 #define xen_unmap(cookie) iounmap((cookie))
325
326 static inline bool xen_arch_need_swiotlb(struct device *dev,
327 phys_addr_t phys,
328 dma_addr_t dev_addr)
329 {
330 return false;
331 }
332
333 static inline unsigned long xen_get_swiotlb_free_pages(unsigned int order)
334 {
335 return __get_free_pages(__GFP_NOWARN, order);
336 }
337
338 #endif /* _ASM_X86_XEN_PAGE_H */