]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/x86/include/asm/page.h
x86/msr-index: Cleanup bit defines
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / include / asm / page.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1965aae3
PA
2#ifndef _ASM_X86_PAGE_H
3#define _ASM_X86_PAGE_H
83a5101b 4
56cefcea
IM
5#include <linux/types.h>
6
11b7c7dc
JF
7#ifdef __KERNEL__
8
51c78eb3 9#include <asm/page_types.h>
83a5101b
JF
10
11#ifdef CONFIG_X86_64
11b7c7dc
JF
12#include <asm/page_64.h>
13#else
14#include <asm/page_32.h>
83a5101b
JF
15#endif /* CONFIG_X86_64 */
16
345b904c 17#ifndef __ASSEMBLY__
5f5192b9 18
345b904c
JF
19struct page;
20
0e691cf8
YL
21#include <linux/range.h>
22extern struct range pfn_mapped[];
23extern int nr_pfn_mapped;
24
f2f7abcb 25static inline void clear_user_page(void *page, unsigned long vaddr,
51c78eb3 26 struct page *pg)
345b904c
JF
27{
28 clear_page(page);
29}
30
f2f7abcb 31static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
51c78eb3 32 struct page *topage)
345b904c
JF
33{
34 copy_page(to, from);
35}
36
37#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
38 alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
39#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
40
cf4fb15b 41#ifndef __pa
98fd5aee 42#define __pa(x) __phys_addr((unsigned long)(x))
cf4fb15b
YL
43#endif
44
af5c2bd1 45#define __pa_nodebug(x) __phys_addr_nodebug((unsigned long)(x))
98fd5aee
JF
46/* __pa_symbol should be used for C visible symbols.
47 This seems to be the official gcc blessed way to do such arithmetic. */
8fd49936
NK
48/*
49 * We need __phys_reloc_hide() here because gcc may assume that there is no
50 * overflow during __pa() calculation and can optimize it unexpectedly.
51 * Newer versions of gcc provide -fno-strict-overflow switch to handle this
52 * case properly. Once all supported versions of gcc understand it, we can
53 * remove this Voodoo magic stuff. (i.e. once gcc3.x is deprecated)
54 */
7d74275d
AD
55#define __pa_symbol(x) \
56 __phys_addr_symbol(__phys_reloc_hide((unsigned long)(x)))
98fd5aee 57
cf4fb15b 58#ifndef __va
98fd5aee 59#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
cf4fb15b 60#endif
98fd5aee
JF
61
62#define __boot_va(x) __va(x)
63#define __boot_pa(x) __pa(x)
64
af5c2bd1
VN
65/*
66 * virt_to_page(kaddr) returns a valid pointer if and only if
67 * virt_addr_valid(kaddr) returns true.
68 */
98fd5aee
JF
69#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
70#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
af5c2bd1
VN
71extern bool __virt_addr_valid(unsigned long kaddr);
72#define virt_addr_valid(kaddr) __virt_addr_valid((unsigned long) (kaddr))
98fd5aee 73
345b904c
JF
74#endif /* __ASSEMBLY__ */
75
e62f4473 76#include <asm-generic/memory_model.h>
5b17e1cd 77#include <asm-generic/getorder.h>
e62f4473 78
fd8526ad 79#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
345b904c 80
11b7c7dc 81#endif /* __KERNEL__ */
1965aae3 82#endif /* _ASM_X86_PAGE_H */