]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
05e4d316 PA |
2 | #ifndef _ASM_X86_XEN_PAGE_H |
3 | #define _ASM_X86_XEN_PAGE_H | |
20e71f2e | 4 | |
ecbf29cd JF |
5 | #include <linux/kernel.h> |
6 | #include <linux/types.h> | |
7 | #include <linux/spinlock.h> | |
20e71f2e | 8 | #include <linux/pfn.h> |
7e77506a | 9 | #include <linux/mm.h> |
7fd5cf0b | 10 | #include <linux/device.h> |
20e71f2e | 11 | |
7c0f6ba6 | 12 | #include <linux/uaccess.h> |
ecbf29cd | 13 | #include <asm/page.h> |
20e71f2e IY |
14 | #include <asm/pgtable.h> |
15 | ||
ecbf29cd | 16 | #include <xen/interface/xen.h> |
008c320a | 17 | #include <xen/interface/grant_table.h> |
20e71f2e IY |
18 | #include <xen/features.h> |
19 | ||
20 | /* Xen machine address */ | |
21 | typedef struct xmaddr { | |
22 | phys_addr_t maddr; | |
23 | } xmaddr_t; | |
24 | ||
25 | /* Xen pseudo-physical address */ | |
26 | typedef struct xpaddr { | |
27 | phys_addr_t paddr; | |
28 | } xpaddr_t; | |
29 | ||
6f0e8bf1 JG |
30 | #ifdef CONFIG_X86_64 |
31 | #define XEN_PHYSICAL_MASK __sme_clr((1UL << 52) - 1) | |
32 | #else | |
33 | #define XEN_PHYSICAL_MASK __PHYSICAL_MASK | |
34 | #endif | |
35 | ||
36 | #define XEN_PTE_MFN_MASK ((pteval_t)(((signed long)PAGE_MASK) & \ | |
37 | XEN_PHYSICAL_MASK)) | |
38 | ||
20e71f2e IY |
39 | #define XMADDR(x) ((xmaddr_t) { .maddr = (x) }) |
40 | #define XPADDR(x) ((xpaddr_t) { .paddr = (x) }) | |
41 | ||
42 | /**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/ | |
43 | #define INVALID_P2M_ENTRY (~0UL) | |
f4cec35b KRW |
44 | #define FOREIGN_FRAME_BIT (1UL<<(BITS_PER_LONG-1)) |
45 | #define IDENTITY_FRAME_BIT (1UL<<(BITS_PER_LONG-2)) | |
20e71f2e | 46 | #define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT) |
f4cec35b | 47 | #define IDENTITY_FRAME(m) ((m) | IDENTITY_FRAME_BIT) |
20e71f2e | 48 | |
cb3eb850 JG |
49 | #define P2M_PER_PAGE (PAGE_SIZE / sizeof(unsigned long)) |
50 | ||
7e77506a | 51 | extern unsigned long *machine_to_phys_mapping; |
ccbcdf7c | 52 | extern unsigned long machine_to_phys_nr; |
5b8e7d80 JG |
53 | extern unsigned long *xen_p2m_addr; |
54 | extern unsigned long xen_p2m_size; | |
55 | extern unsigned long xen_max_p2m_pfn; | |
8006ec3e | 56 | |
8edfcf88 DV |
57 | extern int xen_alloc_p2m_entry(unsigned long pfn); |
58 | ||
d451bb7a | 59 | extern unsigned long get_phys_to_machine(unsigned long pfn); |
c3798062 | 60 | extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn); |
6eaa412f | 61 | extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn); |
cb3eb850 JG |
62 | extern unsigned long __init set_phys_range_identity(unsigned long pfn_s, |
63 | unsigned long pfn_e); | |
20e71f2e | 64 | |
3d4ebdb2 | 65 | #ifdef CONFIG_XEN_PV |
1429d46d ZK |
66 | extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, |
67 | struct gnttab_map_grant_ref *kmap_ops, | |
68 | struct page **pages, unsigned int count); | |
1429d46d | 69 | extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, |
853d0289 | 70 | struct gnttab_unmap_grant_ref *kunmap_ops, |
1429d46d | 71 | struct page **pages, unsigned int count); |
3d4ebdb2 VK |
72 | #else |
73 | static inline int | |
74 | set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, | |
75 | struct gnttab_map_grant_ref *kmap_ops, | |
76 | struct page **pages, unsigned int count) | |
77 | { | |
78 | return 0; | |
79 | } | |
80 | ||
81 | static inline int | |
82 | clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, | |
83 | struct gnttab_unmap_grant_ref *kunmap_ops, | |
84 | struct page **pages, unsigned int count) | |
85 | { | |
86 | return 0; | |
87 | } | |
88 | #endif | |
448f2831 | 89 | |
90fff3ea JG |
90 | /* |
91 | * Helper functions to write or read unsigned long values to/from | |
92 | * memory, when the access may fault. | |
93 | */ | |
94 | static inline int xen_safe_write_ulong(unsigned long *addr, unsigned long val) | |
95 | { | |
96 | return __put_user(val, (unsigned long __user *)addr); | |
97 | } | |
98 | ||
99 | static inline int xen_safe_read_ulong(unsigned long *addr, unsigned long *val) | |
100 | { | |
101 | return __get_user(*val, (unsigned long __user *)addr); | |
102 | } | |
103 | ||
3d4ebdb2 | 104 | #ifdef CONFIG_XEN_PV |
0aad5689 JG |
105 | /* |
106 | * When to use pfn_to_mfn(), __pfn_to_mfn() or get_phys_to_machine(): | |
107 | * - pfn_to_mfn() returns either INVALID_P2M_ENTRY or the mfn. No indicator | |
108 | * bits (identity or foreign) are set. | |
109 | * - __pfn_to_mfn() returns the found entry of the p2m table. A possibly set | |
110 | * identity or foreign indicator will be still set. __pfn_to_mfn() is | |
054954eb JG |
111 | * encapsulating get_phys_to_machine() which is called in special cases only. |
112 | * - get_phys_to_machine() is to be called by __pfn_to_mfn() only in special | |
113 | * cases needing an extended handling. | |
0aad5689 JG |
114 | */ |
115 | static inline unsigned long __pfn_to_mfn(unsigned long pfn) | |
116 | { | |
054954eb JG |
117 | unsigned long mfn; |
118 | ||
119 | if (pfn < xen_p2m_size) | |
120 | mfn = xen_p2m_addr[pfn]; | |
121 | else if (unlikely(pfn < xen_max_p2m_pfn)) | |
122 | return get_phys_to_machine(pfn); | |
123 | else | |
124 | return IDENTITY_FRAME(pfn); | |
125 | ||
126 | if (unlikely(mfn == INVALID_P2M_ENTRY)) | |
127 | return get_phys_to_machine(pfn); | |
128 | ||
129 | return mfn; | |
0aad5689 | 130 | } |
3d4ebdb2 VK |
131 | #else |
132 | static inline unsigned long __pfn_to_mfn(unsigned long pfn) | |
133 | { | |
134 | return pfn; | |
135 | } | |
136 | #endif | |
0aad5689 | 137 | |
20e71f2e IY |
138 | static inline unsigned long pfn_to_mfn(unsigned long pfn) |
139 | { | |
cfd8951e JF |
140 | unsigned long mfn; |
141 | ||
0df4f266 JG |
142 | /* |
143 | * Some x86 code are still using pfn_to_mfn instead of | |
144 | * pfn_to_mfn. This will have to be removed when we figured | |
145 | * out which call. | |
146 | */ | |
20e71f2e IY |
147 | if (xen_feature(XENFEAT_auto_translated_physmap)) |
148 | return pfn; | |
149 | ||
0aad5689 | 150 | mfn = __pfn_to_mfn(pfn); |
cfd8951e JF |
151 | |
152 | if (mfn != INVALID_P2M_ENTRY) | |
f4cec35b | 153 | mfn &= ~(FOREIGN_FRAME_BIT | IDENTITY_FRAME_BIT); |
cfd8951e JF |
154 | |
155 | return mfn; | |
20e71f2e IY |
156 | } |
157 | ||
158 | static inline int phys_to_machine_mapping_valid(unsigned long pfn) | |
159 | { | |
160 | if (xen_feature(XENFEAT_auto_translated_physmap)) | |
161 | return 1; | |
162 | ||
0aad5689 | 163 | return __pfn_to_mfn(pfn) != INVALID_P2M_ENTRY; |
20e71f2e IY |
164 | } |
165 | ||
0160676b | 166 | static inline unsigned long mfn_to_pfn_no_overrides(unsigned long mfn) |
20e71f2e IY |
167 | { |
168 | unsigned long pfn; | |
0160676b | 169 | int ret; |
20e71f2e | 170 | |
0160676b DV |
171 | if (unlikely(mfn >= machine_to_phys_nr)) |
172 | return ~0; | |
173 | ||
20e71f2e IY |
174 | /* |
175 | * The array access can fail (e.g., device space beyond end of RAM). | |
176 | * In such cases it doesn't matter what we return (we return garbage), | |
177 | * but we must handle the fault without crashing! | |
178 | */ | |
90fff3ea | 179 | ret = xen_safe_read_ulong(&machine_to_phys_mapping[mfn], &pfn); |
706cc9d2 | 180 | if (ret < 0) |
0160676b DV |
181 | return ~0; |
182 | ||
183 | return pfn; | |
184 | } | |
185 | ||
186 | static inline unsigned long mfn_to_pfn(unsigned long mfn) | |
187 | { | |
188 | unsigned long pfn; | |
189 | ||
0df4f266 JG |
190 | /* |
191 | * Some x86 code are still using mfn_to_pfn instead of | |
192 | * gfn_to_pfn. This will have to be removed when we figure | |
193 | * out which call. | |
194 | */ | |
0160676b DV |
195 | if (xen_feature(XENFEAT_auto_translated_physmap)) |
196 | return mfn; | |
197 | ||
198 | pfn = mfn_to_pfn_no_overrides(mfn); | |
0bb599fd DV |
199 | if (__pfn_to_mfn(pfn) != mfn) |
200 | pfn = ~0; | |
706cc9d2 | 201 | |
1429d46d | 202 | /* |
0bb599fd DV |
203 | * pfn is ~0 if there are no entries in the m2p for mfn or the |
204 | * entry doesn't map back to the mfn. | |
448f2831 | 205 | */ |
0aad5689 | 206 | if (pfn == ~0 && __pfn_to_mfn(mfn) == IDENTITY_FRAME(mfn)) |
706cc9d2 | 207 | pfn = mfn; |
448f2831 | 208 | |
20e71f2e IY |
209 | return pfn; |
210 | } | |
211 | ||
212 | static inline xmaddr_t phys_to_machine(xpaddr_t phys) | |
213 | { | |
214 | unsigned offset = phys.paddr & ~PAGE_MASK; | |
947d0496 | 215 | return XMADDR(PFN_PHYS(pfn_to_mfn(PFN_DOWN(phys.paddr))) | offset); |
20e71f2e IY |
216 | } |
217 | ||
218 | static inline xpaddr_t machine_to_phys(xmaddr_t machine) | |
219 | { | |
220 | unsigned offset = machine.maddr & ~PAGE_MASK; | |
947d0496 | 221 | return XPADDR(PFN_PHYS(mfn_to_pfn(PFN_DOWN(machine.maddr))) | offset); |
20e71f2e IY |
222 | } |
223 | ||
0df4f266 JG |
224 | /* Pseudo-physical <-> Guest conversion */ |
225 | static inline unsigned long pfn_to_gfn(unsigned long pfn) | |
226 | { | |
227 | if (xen_feature(XENFEAT_auto_translated_physmap)) | |
228 | return pfn; | |
229 | else | |
230 | return pfn_to_mfn(pfn); | |
231 | } | |
232 | ||
233 | static inline unsigned long gfn_to_pfn(unsigned long gfn) | |
234 | { | |
235 | if (xen_feature(XENFEAT_auto_translated_physmap)) | |
236 | return gfn; | |
237 | else | |
238 | return mfn_to_pfn(gfn); | |
239 | } | |
240 | ||
32e09870 | 241 | /* Pseudo-physical <-> Bus conversion */ |
0df4f266 JG |
242 | #define pfn_to_bfn(pfn) pfn_to_gfn(pfn) |
243 | #define bfn_to_pfn(bfn) gfn_to_pfn(bfn) | |
32e09870 | 244 | |
20e71f2e IY |
245 | /* |
246 | * We detect special mappings in one of two ways: | |
247 | * 1. If the MFN is an I/O page then Xen will set the m2p entry | |
248 | * to be outside our maximum possible pseudophys range. | |
249 | * 2. If the MFN belongs to a different domain then we will certainly | |
250 | * not have MFN in our p2m table. Conversely, if the page is ours, | |
251 | * then we'll have p2m(m2p(MFN))==MFN. | |
252 | * If we detect a special mapping then it doesn't have a 'struct page'. | |
253 | * We force !pfn_valid() by returning an out-of-range pointer. | |
254 | * | |
255 | * NB. These checks require that, for any MFN that is not in our reservation, | |
256 | * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if | |
257 | * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN. | |
258 | * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety. | |
259 | * | |
260 | * NB2. When deliberately mapping foreign pages into the p2m table, you *must* | |
261 | * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we | |
262 | * require. In all the cases we care about, the FOREIGN_FRAME bit is | |
263 | * masked (e.g., pfn_to_mfn()) so behaviour there is correct. | |
264 | */ | |
32e09870 | 265 | static inline unsigned long bfn_to_local_pfn(unsigned long mfn) |
20e71f2e | 266 | { |
fc590efe MR |
267 | unsigned long pfn; |
268 | ||
269 | if (xen_feature(XENFEAT_auto_translated_physmap)) | |
270 | return mfn; | |
271 | ||
272 | pfn = mfn_to_pfn(mfn); | |
0aad5689 | 273 | if (__pfn_to_mfn(pfn) != mfn) |
c0011dbf | 274 | return -1; /* force !pfn_valid() */ |
20e71f2e IY |
275 | return pfn; |
276 | } | |
277 | ||
20e71f2e IY |
278 | /* VIRT <-> MACHINE conversion */ |
279 | #define virt_to_machine(v) (phys_to_machine(XPADDR(__pa(v)))) | |
b40bf53e AN |
280 | #define virt_to_pfn(v) (PFN_DOWN(__pa(v))) |
281 | #define virt_to_mfn(v) (pfn_to_mfn(virt_to_pfn(v))) | |
20e71f2e IY |
282 | #define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) |
283 | ||
0df4f266 JG |
284 | /* VIRT <-> GUEST conversion */ |
285 | #define virt_to_gfn(v) (pfn_to_gfn(virt_to_pfn(v))) | |
286 | #define gfn_to_virt(g) (__va(gfn_to_pfn(g) << PAGE_SHIFT)) | |
287 | ||
20e71f2e IY |
288 | static inline unsigned long pte_mfn(pte_t pte) |
289 | { | |
6f0e8bf1 | 290 | return (pte.pte & XEN_PTE_MFN_MASK) >> PAGE_SHIFT; |
20e71f2e IY |
291 | } |
292 | ||
293 | static inline pte_t mfn_pte(unsigned long page_nr, pgprot_t pgprot) | |
294 | { | |
295 | pte_t pte; | |
296 | ||
297 | pte.pte = ((phys_addr_t)page_nr << PAGE_SHIFT) | | |
b534816b | 298 | massage_pgprot(pgprot); |
20e71f2e IY |
299 | |
300 | return pte; | |
301 | } | |
302 | ||
303 | static inline pteval_t pte_val_ma(pte_t pte) | |
304 | { | |
305 | return pte.pte; | |
306 | } | |
307 | ||
308 | static inline pte_t __pte_ma(pteval_t x) | |
309 | { | |
310 | return (pte_t) { .pte = x }; | |
311 | } | |
312 | ||
20e71f2e | 313 | #define pmd_val_ma(v) ((v).pmd) |
f6e58732 | 314 | #ifdef __PAGETABLE_PUD_FOLDED |
f2a6a705 | 315 | #define pud_val_ma(v) ((v).p4d.pgd.pgd) |
f6e58732 JF |
316 | #else |
317 | #define pud_val_ma(v) ((v).pud) | |
318 | #endif | |
20e71f2e | 319 | #define __pmd_ma(x) ((pmd_t) { (x) } ) |
20e71f2e | 320 | |
f2a6a705 KS |
321 | #ifdef __PAGETABLE_P4D_FOLDED |
322 | #define p4d_val_ma(x) ((x).pgd.pgd) | |
323 | #else | |
324 | #define p4d_val_ma(x) ((x).p4d) | |
325 | #endif | |
20e71f2e | 326 | |
ce803e70 | 327 | xmaddr_t arbitrary_virt_to_machine(void *address); |
9976b39b | 328 | unsigned long arbitrary_virt_to_mfn(void *vaddr); |
20e71f2e IY |
329 | void make_lowmem_page_readonly(void *vaddr); |
330 | void make_lowmem_page_readwrite(void *vaddr); | |
331 | ||
3216dceb | 332 | #define xen_remap(cookie, size) ioremap((cookie), (size)); |
efaf30a3 | 333 | #define xen_unmap(cookie) iounmap((cookie)) |
3216dceb | 334 | |
a4dba130 | 335 | static inline bool xen_arch_need_swiotlb(struct device *dev, |
291be10f JG |
336 | phys_addr_t phys, |
337 | dma_addr_t dev_addr) | |
a4dba130 SS |
338 | { |
339 | return false; | |
340 | } | |
341 | ||
8746515d SS |
342 | static inline unsigned long xen_get_swiotlb_free_pages(unsigned int order) |
343 | { | |
344 | return __get_free_pages(__GFP_NOWARN, order); | |
345 | } | |
346 | ||
05e4d316 | 347 | #endif /* _ASM_X86_XEN_PAGE_H */ |