]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/powerpc/include/asm/pgtable.h
Merge tag 'linux-kselftest-4.13-rc6-fixes' of git://git.kernel.org/pub/scm/linux...
[mirror_ubuntu-artful-kernel.git] / arch / powerpc / include / asm / pgtable.h
CommitLineData
047ea784
PM
1#ifndef _ASM_POWERPC_PGTABLE_H
2#define _ASM_POWERPC_PGTABLE_H
3
9c709f3b 4#ifndef __ASSEMBLY__
c34a51ce 5#include <linux/mmdebug.h>
1c98025c 6#include <linux/mmzone.h>
9c709f3b
DG
7#include <asm/processor.h> /* For TASK_SIZE */
8#include <asm/mmu.h>
9#include <asm/page.h>
8d30c14c 10
9c709f3b 11struct mm_struct;
8d30c14c 12
9c709f3b
DG
13#endif /* !__ASSEMBLY__ */
14
3dfcb315
AK
15#ifdef CONFIG_PPC_BOOK3S
16#include <asm/book3s/pgtable.h>
17#else
17ed9e31 18#include <asm/nohash/pgtable.h>
3dfcb315 19#endif /* !CONFIG_PPC_BOOK3S */
1da177e4 20
1da177e4 21#ifndef __ASSEMBLY__
64b3d0e8 22
78f1dbde
AK
23#include <asm/tlbflush.h>
24
71087002
BH
25/* Keep these as a macros to avoid include dependency mess */
26#define pte_page(x) pfn_to_page(pte_pfn(x))
27#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
28
9c709f3b
DG
29/*
30 * ZERO_PAGE is a global shared page that is always zero: used
31 * for zero-mapped memory areas etc..
32 */
33extern unsigned long empty_zero_page[];
34#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
35
36extern pgd_t swapper_pg_dir[];
37
1c98025c
SW
38void limit_zone_pfn(enum zone_type zone, unsigned long max_pfn);
39int dma_pfn_limit_to_zone(u64 pfn_limit);
9c709f3b
DG
40extern void paging_init(void);
41
42/*
43 * kern_addr_valid is intended to indicate whether an address is a valid
44 * kernel address. Most 32-bit archs define it as always true (like this)
45 * but most 64-bit archs actually perform a test. What should we do here?
46 */
47#define kern_addr_valid(addr) (1)
48
1da177e4 49#include <asm-generic/pgtable.h>
1e3519f8
BH
50
51
52/*
53 * This gets called at the end of handling a page fault, when
54 * the kernel has put a new PTE into the page table for the process.
55 * We use it to ensure coherency between the i-cache and d-cache
56 * for the page which has just been mapped in.
57 * On machines which use an MMU hash table, we use this to put a
58 * corresponding HPTE into the hash table ahead of time, instead of
59 * waiting for the inevitable extra hash-table miss exception.
60 */
4b3073e1 61extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
1e3519f8 62
e2b3d202 63extern int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
b30e7590
AK
64 unsigned long end, int write,
65 struct page **pages, int *nr);
074c2eae
AK
66#ifndef CONFIG_TRANSPARENT_HUGEPAGE
67#define pmd_large(pmd) 0
074c2eae 68#endif
691e95fd 69pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
891121e6 70 bool *is_thp, unsigned *shift);
691e95fd 71static inline pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
891121e6 72 bool *is_thp, unsigned *shift)
691e95fd 73{
9af3f56b
AK
74 VM_WARN(!arch_irqs_disabled(),
75 "%s called with irq enabled\n", __func__);
891121e6 76 return __find_linux_pte_or_hugepte(pgdir, ea, is_thp, shift);
691e95fd 77}
e9ab1a1c
AK
78
79unsigned long vmalloc_to_phys(void *vmalloc_addr);
80
9b081e10
CL
81void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
82void pgtable_cache_init(void);
029d9252
ME
83
84#ifdef CONFIG_STRICT_KERNEL_RWX
85void mark_initmem_nx(void);
86#else
87static inline void mark_initmem_nx(void) { }
88#endif
89
1da177e4
LT
90#endif /* __ASSEMBLY__ */
91
047ea784 92#endif /* _ASM_POWERPC_PGTABLE_H */