]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/x86/include/asm/pgtable_64.h
x86/bugs, KVM: Support the combination of guest and host IBRS
[mirror_ubuntu-artful-kernel.git] / arch / x86 / include / asm / pgtable_64.h
CommitLineData
1965aae3
PA
1#ifndef _ASM_X86_PGTABLE_64_H
2#define _ASM_X86_PGTABLE_64_H
1da177e4 3
6df95fd7 4#include <linux/const.h>
fb355149
JF
5#include <asm/pgtable_64_types.h>
6
9d291e78
VG
7#ifndef __ASSEMBLY__
8
1da177e4
LT
9/*
10 * This file contains the functions and defines necessary to modify and use
11 * the x86-64 page table tree.
12 */
13#include <asm/processor.h>
1977f032 14#include <linux/bitops.h>
1da177e4 15#include <linux/threads.h>
1da177e4 16
032370b9
KS
17extern p4d_t level4_kernel_pgt[512];
18extern p4d_t level4_ident_pgt[512];
1da177e4 19extern pud_t level3_kernel_pgt[512];
1da177e4
LT
20extern pud_t level3_ident_pgt[512];
21extern pmd_t level2_kernel_pgt[512];
084a2a4e
JF
22extern pmd_t level2_fixmap_pgt[512];
23extern pmd_t level2_ident_pgt[512];
0b5a5063 24extern pte_t level1_fixmap_pgt[512];
65ade2f8 25extern pgd_t init_top_pgt[];
1da177e4 26
65ade2f8 27#define swapper_pg_dir init_top_pgt
1da177e4 28
1da177e4 29extern void paging_init(void);
1da177e4 30
7f94401e 31#define pte_ERROR(e) \
c767a54b 32 pr_err("%s:%d: bad pte %p(%016lx)\n", \
7f94401e
JP
33 __FILE__, __LINE__, &(e), pte_val(e))
34#define pmd_ERROR(e) \
c767a54b 35 pr_err("%s:%d: bad pmd %p(%016lx)\n", \
7f94401e
JP
36 __FILE__, __LINE__, &(e), pmd_val(e))
37#define pud_ERROR(e) \
c767a54b 38 pr_err("%s:%d: bad pud %p(%016lx)\n", \
7f94401e 39 __FILE__, __LINE__, &(e), pud_val(e))
b8504058
KS
40
41#if CONFIG_PGTABLE_LEVELS >= 5
42#define p4d_ERROR(e) \
43 pr_err("%s:%d: bad p4d %p(%016lx)\n", \
44 __FILE__, __LINE__, &(e), p4d_val(e))
45#endif
46
7f94401e 47#define pgd_ERROR(e) \
c767a54b 48 pr_err("%s:%d: bad pgd %p(%016lx)\n", \
7f94401e 49 __FILE__, __LINE__, &(e), pgd_val(e))
1da177e4 50
4891645e
JF
51struct mm_struct;
52
f2a6a705 53void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte);
0814e0ba
EH
54void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte);
55
4891645e
JF
56static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
57 pte_t *ptep)
1da177e4 58{
4891645e
JF
59 *ptep = native_make_pte(0);
60}
1da177e4 61
4891645e 62static inline void native_set_pte(pte_t *ptep, pte_t pte)
1da177e4 63{
4891645e
JF
64 *ptep = pte;
65}
1da177e4 66
b65e6390
IM
67static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
68{
69 native_set_pte(ptep, pte);
70}
71
db3eb96f
AA
72static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
73{
74 *pmdp = pmd;
75}
76
77static inline void native_pmd_clear(pmd_t *pmd)
78{
79 native_set_pmd(pmd, native_make_pmd(0));
80}
81
4891645e 82static inline pte_t native_ptep_get_and_clear(pte_t *xp)
1da177e4 83{
4891645e
JF
84#ifdef CONFIG_SMP
85 return native_make_pte(xchg(&xp->pte, 0));
86#else
7f94401e
JP
87 /* native_local_ptep_get_and_clear,
88 but duplicated because of cyclic dependency */
4891645e
JF
89 pte_t ret = *xp;
90 native_pte_clear(NULL, 0, xp);
91 return ret;
92#endif
1da177e4
LT
93}
94
db3eb96f 95static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
1da177e4 96{
db3eb96f
AA
97#ifdef CONFIG_SMP
98 return native_make_pmd(xchg(&xp->pmd, 0));
99#else
100 /* native_local_pmdp_get_and_clear,
101 but duplicated because of cyclic dependency */
102 pmd_t ret = *xp;
103 native_pmd_clear(xp);
104 return ret;
105#endif
4891645e 106}
1da177e4 107
4891645e 108static inline void native_set_pud(pud_t *pudp, pud_t pud)
1da177e4 109{
4891645e 110 *pudp = pud;
1da177e4
LT
111}
112
4891645e
JF
113static inline void native_pud_clear(pud_t *pud)
114{
115 native_set_pud(pud, native_make_pud(0));
116}
61e06037 117
a00cc7d9
MW
118static inline pud_t native_pudp_get_and_clear(pud_t *xp)
119{
120#ifdef CONFIG_SMP
121 return native_make_pud(xchg(&xp->pud, 0));
122#else
123 /* native_local_pudp_get_and_clear,
124 * but duplicated because of cyclic dependency
125 */
126 pud_t ret = *xp;
127
128 native_pud_clear(xp);
129 return ret;
130#endif
f2a6a705
KS
131}
132
fb45c591
DH
133#ifdef CONFIG_PAGE_TABLE_ISOLATION
134/*
135 * All top-level PAGE_TABLE_ISOLATION page tables are order-1 pages
136 * (8k-aligned and 8k in size). The kernel one is at the beginning 4k and
137 * the user one is in the last 4k. To switch between them, you
138 * just need to flip the 12th bit in their addresses.
139 */
140#define PTI_PGTABLE_SWITCH_BIT PAGE_SHIFT
141
142/*
143 * This generates better code than the inline assembly in
144 * __set_bit().
145 */
146static inline void *ptr_set_bit(void *ptr, int bit)
147{
148 unsigned long __ptr = (unsigned long)ptr;
149
150 __ptr |= BIT(bit);
151 return (void *)__ptr;
152}
153static inline void *ptr_clear_bit(void *ptr, int bit)
154{
155 unsigned long __ptr = (unsigned long)ptr;
156
157 __ptr &= ~BIT(bit);
158 return (void *)__ptr;
159}
160
161static inline pgd_t *kernel_to_user_pgdp(pgd_t *pgdp)
162{
163 return ptr_set_bit(pgdp, PTI_PGTABLE_SWITCH_BIT);
164}
165
166static inline pgd_t *user_to_kernel_pgdp(pgd_t *pgdp)
167{
168 return ptr_clear_bit(pgdp, PTI_PGTABLE_SWITCH_BIT);
169}
170
171static inline p4d_t *kernel_to_user_p4dp(p4d_t *p4dp)
172{
173 return ptr_set_bit(p4dp, PTI_PGTABLE_SWITCH_BIT);
174}
175
176static inline p4d_t *user_to_kernel_p4dp(p4d_t *p4dp)
177{
178 return ptr_clear_bit(p4dp, PTI_PGTABLE_SWITCH_BIT);
179}
180#endif /* CONFIG_PAGE_TABLE_ISOLATION */
181
182/*
183 * Page table pages are page-aligned. The lower half of the top
184 * level is used for userspace and the top half for the kernel.
185 *
186 * Returns true for parts of the PGD that map userspace and
187 * false for the parts that map the kernel.
188 */
189static inline bool pgdp_maps_userspace(void *__ptr)
190{
191 unsigned long ptr = (unsigned long)__ptr;
192
193 return (ptr & ~PAGE_MASK) < (PAGE_SIZE / 2);
194}
195
196#ifdef CONFIG_PAGE_TABLE_ISOLATION
197pgd_t __pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd);
198
199/*
200 * Take a PGD location (pgdp) and a pgd value that needs to be set there.
201 * Populates the user and returns the resulting PGD that must be set in
202 * the kernel copy of the page tables.
203 */
204static inline pgd_t pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd)
205{
206 if (!static_cpu_has(X86_FEATURE_PTI))
207 return pgd;
208 return __pti_set_user_pgd(pgdp, pgd);
209}
210#else
211static inline pgd_t pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd)
212{
213 return pgd;
214}
215#endif
216
f2a6a705
KS
217static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d)
218{
fb45c591
DH
219#if defined(CONFIG_PAGE_TABLE_ISOLATION) && !defined(CONFIG_X86_5LEVEL)
220 p4dp->pgd = pti_set_user_pgd(&p4dp->pgd, p4d.pgd);
221#else
f2a6a705 222 *p4dp = p4d;
fb45c591 223#endif
f2a6a705
KS
224}
225
226static inline void native_p4d_clear(p4d_t *p4d)
227{
b8504058
KS
228#ifdef CONFIG_X86_5LEVEL
229 native_set_p4d(p4d, native_make_p4d(0));
230#else
f2a6a705 231 native_set_p4d(p4d, (p4d_t) { .pgd = native_make_pgd(0)});
b8504058 232#endif
a00cc7d9
MW
233}
234
4891645e
JF
235static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
236{
fb45c591
DH
237#ifdef CONFIG_PAGE_TABLE_ISOLATION
238 *pgdp = pti_set_user_pgd(pgdp, pgd);
239#else
4891645e 240 *pgdp = pgd;
fb45c591 241#endif
4891645e 242}
8c65b4a6 243
7f94401e 244static inline void native_pgd_clear(pgd_t *pgd)
61e06037 245{
4891645e 246 native_set_pgd(pgd, native_make_pgd(0));
61e06037
ZA
247}
248
5372e155 249extern void sync_global_pgds(unsigned long start, unsigned long end);
6afb5157 250
1da177e4
LT
251/*
252 * Conversion functions: convert a page and protection to a page entry,
253 * and a page entry and page directory to the page they refer to.
254 */
255
1da177e4
LT
256/*
257 * Level 4 access.
258 */
e00fc542 259static inline int pgd_large(pgd_t pgd) { return 0; }
e7a9b0b3 260#define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE)
1da177e4
LT
261
262/* PUD - Level3 access */
1da177e4 263
1da177e4 264/* PMD - Level 2 access */
1da177e4
LT
265
266/* PTE - Level 1 access. */
267
1da177e4 268/* x86-64 always has all page tables mapped. */
7f94401e 269#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
4e60c86b 270#define pte_unmap(pte) ((void)(pte))/* NOP */
1da177e4 271
00839ee3
DH
272/*
273 * Encode and de-code a swap entry
274 *
275 * | ... | 11| 10| 9|8|7|6|5| 4| 3|2|1|0| <- bit number
276 * | ... |SW3|SW2|SW1|G|L|D|A|CD|WT|U|W|P| <- bit names
ace7fab7 277 * | OFFSET (14->63) | TYPE (9-13) |0|X|X|X| X| X|X|X|0| <- swp entry
00839ee3
DH
278 *
279 * G (8) is aliased and used as a PROT_NONE indicator for
280 * !present ptes. We need to start storing swap entries above
281 * there. We also need to avoid using A and D because of an
282 * erratum where they can be incorrectly set by hardware on
283 * non-present PTEs.
284 */
285#define SWP_TYPE_FIRST_BIT (_PAGE_BIT_PROTNONE + 1)
0a191362 286#define SWP_TYPE_BITS 5
00839ee3 287/* Place the offset above the type: */
ace7fab7 288#define SWP_OFFSET_FIRST_BIT (SWP_TYPE_FIRST_BIT + SWP_TYPE_BITS)
1796316a
JB
289
290#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
291
00839ee3 292#define __swp_type(x) (((x).val >> (SWP_TYPE_FIRST_BIT)) \
1796316a 293 & ((1U << SWP_TYPE_BITS) - 1))
00839ee3 294#define __swp_offset(x) ((x).val >> SWP_OFFSET_FIRST_BIT)
1796316a 295#define __swp_entry(type, offset) ((swp_entry_t) { \
00839ee3
DH
296 ((type) << (SWP_TYPE_FIRST_BIT)) \
297 | ((offset) << SWP_OFFSET_FIRST_BIT) })
7f94401e 298#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) })
c8e5393a 299#define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val })
1da177e4 300
7f94401e 301extern int kern_addr_valid(unsigned long addr);
31eedd82 302extern void cleanup_highmap(void);
1da177e4 303
1da177e4 304#define HAVE_ARCH_UNMAPPED_AREA
cc503c1b 305#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1da177e4
LT
306
307#define pgtable_cache_init() do { } while (0)
da8f153e 308#define check_pgt_cache() do { } while (0)
1da177e4
LT
309
310#define PAGE_AGP PAGE_KERNEL_NOCACHE
311#define HAVE_PAGE_AGP 1
312
313/* fs/proc/kcore.c */
314#define kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK)
9063c61f 315#define kc_offset_to_vaddr(o) ((o) | ~__VIRTUAL_MASK)
1da177e4 316
1da177e4 317#define __HAVE_ARCH_PTE_SAME
5f6e8da7 318
fb50b020
AD
319#define vmemmap ((struct page *)VMEMMAP_START)
320
321extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
322extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
323
e585513b
KS
324#define gup_fast_permitted gup_fast_permitted
325static inline bool gup_fast_permitted(unsigned long start, int nr_pages,
326 int write)
327{
328 unsigned long len, end;
329
330 len = (unsigned long)nr_pages << PAGE_SHIFT;
331 end = start + len;
332 if (end < start)
333 return false;
334 if (end >> __VIRTUAL_MASK_SHIFT)
335 return false;
336 return true;
337}
6dd29b3d 338
e585513b 339#endif /* !__ASSEMBLY__ */
1965aae3 340#endif /* _ASM_X86_PGTABLE_64_H */