]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/x86/include/asm/pgtable_64.h
KVM: x86: Use gpa_t for cr2/gpa to fix TDP support on 32-bit KVM
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / include / asm / pgtable_64.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_PGTABLE_64_H
3 #define _ASM_X86_PGTABLE_64_H
4
5 #include <linux/const.h>
6 #include <asm/pgtable_64_types.h>
7
8 #ifndef __ASSEMBLY__
9
10 /*
11 * This file contains the functions and defines necessary to modify and use
12 * the x86-64 page table tree.
13 */
14 #include <asm/processor.h>
15 #include <linux/bitops.h>
16 #include <linux/threads.h>
17 #include <asm/fixmap.h>
18
19 extern p4d_t level4_kernel_pgt[512];
20 extern p4d_t level4_ident_pgt[512];
21 extern pud_t level3_kernel_pgt[512];
22 extern pud_t level3_ident_pgt[512];
23 extern pmd_t level2_kernel_pgt[512];
24 extern pmd_t level2_fixmap_pgt[512];
25 extern pmd_t level2_ident_pgt[512];
26 extern pte_t level1_fixmap_pgt[512 * FIXMAP_PMD_NUM];
27 extern pgd_t init_top_pgt[];
28
29 #define swapper_pg_dir init_top_pgt
30
31 extern void paging_init(void);
32 static inline void sync_initial_page_table(void) { }
33
34 #define pte_ERROR(e) \
35 pr_err("%s:%d: bad pte %p(%016lx)\n", \
36 __FILE__, __LINE__, &(e), pte_val(e))
37 #define pmd_ERROR(e) \
38 pr_err("%s:%d: bad pmd %p(%016lx)\n", \
39 __FILE__, __LINE__, &(e), pmd_val(e))
40 #define pud_ERROR(e) \
41 pr_err("%s:%d: bad pud %p(%016lx)\n", \
42 __FILE__, __LINE__, &(e), pud_val(e))
43
44 #if CONFIG_PGTABLE_LEVELS >= 5
45 #define p4d_ERROR(e) \
46 pr_err("%s:%d: bad p4d %p(%016lx)\n", \
47 __FILE__, __LINE__, &(e), p4d_val(e))
48 #endif
49
50 #define pgd_ERROR(e) \
51 pr_err("%s:%d: bad pgd %p(%016lx)\n", \
52 __FILE__, __LINE__, &(e), pgd_val(e))
53
54 struct mm_struct;
55
56 void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte);
57 void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte);
58
59 static inline void native_set_pte(pte_t *ptep, pte_t pte)
60 {
61 WRITE_ONCE(*ptep, pte);
62 }
63
64 static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
65 pte_t *ptep)
66 {
67 native_set_pte(ptep, native_make_pte(0));
68 }
69
70 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
71 {
72 native_set_pte(ptep, pte);
73 }
74
75 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
76 {
77 WRITE_ONCE(*pmdp, pmd);
78 }
79
80 static inline void native_pmd_clear(pmd_t *pmd)
81 {
82 native_set_pmd(pmd, native_make_pmd(0));
83 }
84
85 static inline pte_t native_ptep_get_and_clear(pte_t *xp)
86 {
87 #ifdef CONFIG_SMP
88 return native_make_pte(xchg(&xp->pte, 0));
89 #else
90 /* native_local_ptep_get_and_clear,
91 but duplicated because of cyclic dependency */
92 pte_t ret = *xp;
93 native_pte_clear(NULL, 0, xp);
94 return ret;
95 #endif
96 }
97
98 static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
99 {
100 #ifdef CONFIG_SMP
101 return native_make_pmd(xchg(&xp->pmd, 0));
102 #else
103 /* native_local_pmdp_get_and_clear,
104 but duplicated because of cyclic dependency */
105 pmd_t ret = *xp;
106 native_pmd_clear(xp);
107 return ret;
108 #endif
109 }
110
111 static inline void native_set_pud(pud_t *pudp, pud_t pud)
112 {
113 WRITE_ONCE(*pudp, pud);
114 }
115
116 static inline void native_pud_clear(pud_t *pud)
117 {
118 native_set_pud(pud, native_make_pud(0));
119 }
120
121 static inline pud_t native_pudp_get_and_clear(pud_t *xp)
122 {
123 #ifdef CONFIG_SMP
124 return native_make_pud(xchg(&xp->pud, 0));
125 #else
126 /* native_local_pudp_get_and_clear,
127 * but duplicated because of cyclic dependency
128 */
129 pud_t ret = *xp;
130
131 native_pud_clear(xp);
132 return ret;
133 #endif
134 }
135
136 static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d)
137 {
138 #if defined(CONFIG_PAGE_TABLE_ISOLATION) && !defined(CONFIG_X86_5LEVEL)
139 WRITE_ONCE(p4dp->pgd, pti_set_user_pgtbl(&p4dp->pgd, p4d.pgd));
140 #else
141 WRITE_ONCE(*p4dp, p4d);
142 #endif
143 }
144
145 static inline void native_p4d_clear(p4d_t *p4d)
146 {
147 #ifdef CONFIG_X86_5LEVEL
148 native_set_p4d(p4d, native_make_p4d(0));
149 #else
150 native_set_p4d(p4d, (p4d_t) { .pgd = native_make_pgd(0)});
151 #endif
152 }
153
154 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
155 {
156 #ifdef CONFIG_PAGE_TABLE_ISOLATION
157 WRITE_ONCE(*pgdp, pti_set_user_pgtbl(pgdp, pgd));
158 #else
159 WRITE_ONCE(*pgdp, pgd);
160 #endif
161 }
162
163 static inline void native_pgd_clear(pgd_t *pgd)
164 {
165 native_set_pgd(pgd, native_make_pgd(0));
166 }
167
168 extern void sync_global_pgds(unsigned long start, unsigned long end);
169
170 /*
171 * Conversion functions: convert a page and protection to a page entry,
172 * and a page entry and page directory to the page they refer to.
173 */
174
175 /*
176 * Level 4 access.
177 */
178 #define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE)
179
180 /* PUD - Level3 access */
181
182 /* PMD - Level 2 access */
183
184 /* PTE - Level 1 access. */
185
186 /* x86-64 always has all page tables mapped. */
187 #define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
188 #define pte_unmap(pte) ((void)(pte))/* NOP */
189
190 /*
191 * Encode and de-code a swap entry
192 *
193 * | ... | 11| 10| 9|8|7|6|5| 4| 3|2| 1|0| <- bit number
194 * | ... |SW3|SW2|SW1|G|L|D|A|CD|WT|U| W|P| <- bit names
195 * | TYPE (59-63) | ~OFFSET (9-58) |0|0|X|X| X| X|X|SD|0| <- swp entry
196 *
197 * G (8) is aliased and used as a PROT_NONE indicator for
198 * !present ptes. We need to start storing swap entries above
199 * there. We also need to avoid using A and D because of an
200 * erratum where they can be incorrectly set by hardware on
201 * non-present PTEs.
202 *
203 * SD (1) in swp entry is used to store soft dirty bit, which helps us
204 * remember soft dirty over page migration
205 *
206 * Bit 7 in swp entry should be 0 because pmd_present checks not only P,
207 * but also L and G.
208 *
209 * The offset is inverted by a binary not operation to make the high
210 * physical bits set.
211 */
212 #define SWP_TYPE_BITS 5
213
214 #define SWP_OFFSET_FIRST_BIT (_PAGE_BIT_PROTNONE + 1)
215
216 /* We always extract/encode the offset by shifting it all the way up, and then down again */
217 #define SWP_OFFSET_SHIFT (SWP_OFFSET_FIRST_BIT+SWP_TYPE_BITS)
218
219 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
220
221 /* Extract the high bits for type */
222 #define __swp_type(x) ((x).val >> (64 - SWP_TYPE_BITS))
223
224 /* Shift up (to get rid of type), then down to get value */
225 #define __swp_offset(x) (~(x).val << SWP_TYPE_BITS >> SWP_OFFSET_SHIFT)
226
227 /*
228 * Shift the offset up "too far" by TYPE bits, then down again
229 * The offset is inverted by a binary not operation to make the high
230 * physical bits set.
231 */
232 #define __swp_entry(type, offset) ((swp_entry_t) { \
233 (~(unsigned long)(offset) << SWP_OFFSET_SHIFT >> SWP_TYPE_BITS) \
234 | ((unsigned long)(type) << (64-SWP_TYPE_BITS)) })
235
236 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) })
237 #define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val((pmd)) })
238 #define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val })
239 #define __swp_entry_to_pmd(x) ((pmd_t) { .pmd = (x).val })
240
241 extern int kern_addr_valid(unsigned long addr);
242 extern void cleanup_highmap(void);
243
244 #define HAVE_ARCH_UNMAPPED_AREA
245 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
246
247 #define pgtable_cache_init() do { } while (0)
248 #define check_pgt_cache() do { } while (0)
249
250 #define PAGE_AGP PAGE_KERNEL_NOCACHE
251 #define HAVE_PAGE_AGP 1
252
253 /* fs/proc/kcore.c */
254 #define kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK)
255 #define kc_offset_to_vaddr(o) ((o) | ~__VIRTUAL_MASK)
256
257 #define __HAVE_ARCH_PTE_SAME
258
259 #define vmemmap ((struct page *)VMEMMAP_START)
260
261 extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
262 extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
263
264 #define gup_fast_permitted gup_fast_permitted
265 static inline bool gup_fast_permitted(unsigned long start, int nr_pages,
266 int write)
267 {
268 unsigned long len, end;
269
270 len = (unsigned long)nr_pages << PAGE_SHIFT;
271 end = start + len;
272 if (end < start)
273 return false;
274 if (end >> __VIRTUAL_MASK_SHIFT)
275 return false;
276 return true;
277 }
278
279 #include <asm/pgtable-invert.h>
280
281 #endif /* !__ASSEMBLY__ */
282 #endif /* _ASM_X86_PGTABLE_64_H */