]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - arch/x86/include/asm/xen/page.h
5a65a755169847f93f721b778d41aad9cf233acc
[mirror_ubuntu-zesty-kernel.git] / arch / x86 / include / asm / xen / page.h
1 #ifndef _ASM_X86_XEN_PAGE_H
2 #define _ASM_X86_XEN_PAGE_H
3
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/spinlock.h>
7 #include <linux/pfn.h>
8 #include <linux/mm.h>
9
10 #include <asm/uaccess.h>
11 #include <asm/page.h>
12 #include <asm/pgtable.h>
13
14 #include <xen/interface/xen.h>
15 #include <xen/grant_table.h>
16 #include <xen/features.h>
17
18 /* Xen machine address */
19 typedef struct xmaddr {
20 phys_addr_t maddr;
21 } xmaddr_t;
22
23 /* Xen pseudo-physical address */
24 typedef struct xpaddr {
25 phys_addr_t paddr;
26 } xpaddr_t;
27
28 #define XMADDR(x) ((xmaddr_t) { .maddr = (x) })
29 #define XPADDR(x) ((xpaddr_t) { .paddr = (x) })
30
31 /**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
32 #define INVALID_P2M_ENTRY (~0UL)
33 #define FOREIGN_FRAME_BIT (1UL<<(BITS_PER_LONG-1))
34 #define IDENTITY_FRAME_BIT (1UL<<(BITS_PER_LONG-2))
35 #define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT)
36 #define IDENTITY_FRAME(m) ((m) | IDENTITY_FRAME_BIT)
37
38 /* Maximum amount of memory we can handle in a domain in pages */
39 #define MAX_DOMAIN_PAGES \
40 ((unsigned long)((u64)CONFIG_XEN_MAX_DOMAIN_MEMORY * 1024 * 1024 * 1024 / PAGE_SIZE))
41
42 extern unsigned long *machine_to_phys_mapping;
43 extern unsigned long machine_to_phys_nr;
44
45 extern unsigned long get_phys_to_machine(unsigned long pfn);
46 extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn);
47 extern bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn);
48 extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
49 extern unsigned long set_phys_range_identity(unsigned long pfn_s,
50 unsigned long pfn_e);
51
52 extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
53 struct gnttab_map_grant_ref *kmap_ops,
54 struct page **pages, unsigned int count);
55 extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
56 struct gnttab_map_grant_ref *kmap_ops,
57 struct page **pages, unsigned int count);
58 extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);
59
60 static inline unsigned long pfn_to_mfn(unsigned long pfn)
61 {
62 unsigned long mfn;
63
64 if (xen_feature(XENFEAT_auto_translated_physmap))
65 return pfn;
66
67 mfn = get_phys_to_machine(pfn);
68
69 if (mfn != INVALID_P2M_ENTRY)
70 mfn &= ~(FOREIGN_FRAME_BIT | IDENTITY_FRAME_BIT);
71
72 return mfn;
73 }
74
75 static inline int phys_to_machine_mapping_valid(unsigned long pfn)
76 {
77 if (xen_feature(XENFEAT_auto_translated_physmap))
78 return 1;
79
80 return get_phys_to_machine(pfn) != INVALID_P2M_ENTRY;
81 }
82
83 static inline unsigned long mfn_to_pfn_no_overrides(unsigned long mfn)
84 {
85 unsigned long pfn;
86 int ret;
87
88 if (xen_feature(XENFEAT_auto_translated_physmap))
89 return mfn;
90
91 if (unlikely(mfn >= machine_to_phys_nr))
92 return ~0;
93
94 /*
95 * The array access can fail (e.g., device space beyond end of RAM).
96 * In such cases it doesn't matter what we return (we return garbage),
97 * but we must handle the fault without crashing!
98 */
99 ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
100 if (ret < 0)
101 return ~0;
102
103 return pfn;
104 }
105
106 static inline unsigned long mfn_to_pfn(unsigned long mfn)
107 {
108 unsigned long pfn;
109
110 if (xen_feature(XENFEAT_auto_translated_physmap))
111 return mfn;
112
113 pfn = mfn_to_pfn_no_overrides(mfn);
114 if (get_phys_to_machine(pfn) != mfn) {
115 /*
116 * If this appears to be a foreign mfn (because the pfn
117 * doesn't map back to the mfn), then check the local override
118 * table to see if there's a better pfn to use.
119 *
120 * m2p_find_override_pfn returns ~0 if it doesn't find anything.
121 */
122 pfn = m2p_find_override_pfn(mfn, ~0);
123 }
124
125 /*
126 * pfn is ~0 if there are no entries in the m2p for mfn or if the
127 * entry doesn't map back to the mfn and m2p_override doesn't have a
128 * valid entry for it.
129 */
130 if (pfn == ~0 &&
131 get_phys_to_machine(mfn) == IDENTITY_FRAME(mfn))
132 pfn = mfn;
133
134 return pfn;
135 }
136
137 static inline xmaddr_t phys_to_machine(xpaddr_t phys)
138 {
139 unsigned offset = phys.paddr & ~PAGE_MASK;
140 return XMADDR(PFN_PHYS(pfn_to_mfn(PFN_DOWN(phys.paddr))) | offset);
141 }
142
143 static inline xpaddr_t machine_to_phys(xmaddr_t machine)
144 {
145 unsigned offset = machine.maddr & ~PAGE_MASK;
146 return XPADDR(PFN_PHYS(mfn_to_pfn(PFN_DOWN(machine.maddr))) | offset);
147 }
148
149 /*
150 * We detect special mappings in one of two ways:
151 * 1. If the MFN is an I/O page then Xen will set the m2p entry
152 * to be outside our maximum possible pseudophys range.
153 * 2. If the MFN belongs to a different domain then we will certainly
154 * not have MFN in our p2m table. Conversely, if the page is ours,
155 * then we'll have p2m(m2p(MFN))==MFN.
156 * If we detect a special mapping then it doesn't have a 'struct page'.
157 * We force !pfn_valid() by returning an out-of-range pointer.
158 *
159 * NB. These checks require that, for any MFN that is not in our reservation,
160 * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if
161 * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN.
162 * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety.
163 *
164 * NB2. When deliberately mapping foreign pages into the p2m table, you *must*
165 * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
166 * require. In all the cases we care about, the FOREIGN_FRAME bit is
167 * masked (e.g., pfn_to_mfn()) so behaviour there is correct.
168 */
169 static inline unsigned long mfn_to_local_pfn(unsigned long mfn)
170 {
171 unsigned long pfn;
172
173 if (xen_feature(XENFEAT_auto_translated_physmap))
174 return mfn;
175
176 pfn = mfn_to_pfn(mfn);
177 if (get_phys_to_machine(pfn) != mfn)
178 return -1; /* force !pfn_valid() */
179 return pfn;
180 }
181
182 /* VIRT <-> MACHINE conversion */
183 #define virt_to_machine(v) (phys_to_machine(XPADDR(__pa(v))))
184 #define virt_to_pfn(v) (PFN_DOWN(__pa(v)))
185 #define virt_to_mfn(v) (pfn_to_mfn(virt_to_pfn(v)))
186 #define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT))
187
188 static inline unsigned long pte_mfn(pte_t pte)
189 {
190 return (pte.pte & PTE_PFN_MASK) >> PAGE_SHIFT;
191 }
192
193 static inline pte_t mfn_pte(unsigned long page_nr, pgprot_t pgprot)
194 {
195 pte_t pte;
196
197 pte.pte = ((phys_addr_t)page_nr << PAGE_SHIFT) |
198 massage_pgprot(pgprot);
199
200 return pte;
201 }
202
203 static inline pteval_t pte_val_ma(pte_t pte)
204 {
205 return pte.pte;
206 }
207
208 static inline pte_t __pte_ma(pteval_t x)
209 {
210 return (pte_t) { .pte = x };
211 }
212
213 #define pmd_val_ma(v) ((v).pmd)
214 #ifdef __PAGETABLE_PUD_FOLDED
215 #define pud_val_ma(v) ((v).pgd.pgd)
216 #else
217 #define pud_val_ma(v) ((v).pud)
218 #endif
219 #define __pmd_ma(x) ((pmd_t) { (x) } )
220
221 #define pgd_val_ma(x) ((x).pgd)
222
223 void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid);
224
225 xmaddr_t arbitrary_virt_to_machine(void *address);
226 unsigned long arbitrary_virt_to_mfn(void *vaddr);
227 void make_lowmem_page_readonly(void *vaddr);
228 void make_lowmem_page_readwrite(void *vaddr);
229
230 #define xen_remap(cookie, size) ioremap((cookie), (size));
231 #define xen_unmap(cookie) iounmap((cookie))
232
233 static inline bool xen_arch_need_swiotlb(struct device *dev,
234 unsigned long pfn,
235 unsigned long mfn)
236 {
237 return false;
238 }
239
240 #endif /* _ASM_X86_XEN_PAGE_H */