]>
Commit | Line | Data |
---|---|---|
ab537dca AK |
1 | #ifndef _ASM_POWERPC_BOOK3S_64_HASH_4K_H |
2 | #define _ASM_POWERPC_BOOK3S_64_HASH_4K_H | |
3 | /* | |
4 | * Entries per page directory level. The PTE level must use a 64b record | |
5 | * for each page table entry. The PMD and PGD level use a 32b record for | |
6 | * each entry by assuming that each entry is page aligned. | |
7 | */ | |
8 | #define PTE_INDEX_SIZE 9 | |
9 | #define PMD_INDEX_SIZE 7 | |
10 | #define PUD_INDEX_SIZE 9 | |
11 | #define PGD_INDEX_SIZE 9 | |
12 | ||
13 | #ifndef __ASSEMBLY__ | |
14 | #define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE) | |
15 | #define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE) | |
16 | #define PUD_TABLE_SIZE (sizeof(pud_t) << PUD_INDEX_SIZE) | |
17 | #define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) | |
18 | #endif /* __ASSEMBLY__ */ | |
19 | ||
20 | #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) | |
21 | #define PTRS_PER_PMD (1 << PMD_INDEX_SIZE) | |
22 | #define PTRS_PER_PUD (1 << PUD_INDEX_SIZE) | |
23 | #define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) | |
24 | ||
25 | /* PMD_SHIFT determines what a second-level page table entry can map */ | |
26 | #define PMD_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE) | |
27 | #define PMD_SIZE (1UL << PMD_SHIFT) | |
28 | #define PMD_MASK (~(PMD_SIZE-1)) | |
29 | ||
30 | /* With 4k base page size, hugepage PTEs go at the PMD level */ | |
31 | #define MIN_HUGEPTE_SHIFT PMD_SHIFT | |
32 | ||
33 | /* PUD_SHIFT determines what a third-level page table entry can map */ | |
34 | #define PUD_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE) | |
35 | #define PUD_SIZE (1UL << PUD_SHIFT) | |
36 | #define PUD_MASK (~(PUD_SIZE-1)) | |
37 | ||
38 | /* PGDIR_SHIFT determines what a fourth-level page table entry can map */ | |
39 | #define PGDIR_SHIFT (PUD_SHIFT + PUD_INDEX_SIZE) | |
40 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) | |
41 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | |
42 | ||
43 | /* Bits to mask out from a PMD to get to the PTE page */ | |
44 | #define PMD_MASKED_BITS 0 | |
45 | /* Bits to mask out from a PUD to get to the PMD page */ | |
46 | #define PUD_MASKED_BITS 0 | |
47 | /* Bits to mask out from a PGD to get to the PUD page */ | |
48 | #define PGD_MASKED_BITS 0 | |
c605782b | 49 | |
c605782b BH |
50 | /* PTE flags to conserve for HPTE identification */ |
51 | #define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | \ | |
bf680d51 | 52 | _PAGE_F_SECOND | _PAGE_F_GIX) |
c605782b BH |
53 | |
54 | /* shift to put page number into pte */ | |
55 | #define PTE_RPN_SHIFT (17) | |
56 | ||
b0412ea9 | 57 | #define _PAGE_4K_PFN 0 |
ab537dca AK |
58 | #ifndef __ASSEMBLY__ |
59 | /* | |
60 | * 4-level page tables related bits | |
61 | */ | |
62 | ||
63 | #define pgd_none(pgd) (!pgd_val(pgd)) | |
64 | #define pgd_bad(pgd) (pgd_val(pgd) == 0) | |
65 | #define pgd_present(pgd) (pgd_val(pgd) != 0) | |
ab537dca AK |
66 | #define pgd_page_vaddr(pgd) (pgd_val(pgd) & ~PGD_MASKED_BITS) |
67 | ||
f281b5d5 AK |
68 | static inline void pgd_clear(pgd_t *pgdp) |
69 | { | |
70 | *pgdp = __pgd(0); | |
71 | } | |
72 | ||
ab537dca AK |
73 | static inline pte_t pgd_pte(pgd_t pgd) |
74 | { | |
75 | return __pte(pgd_val(pgd)); | |
76 | } | |
77 | ||
78 | static inline pgd_t pte_pgd(pte_t pte) | |
79 | { | |
80 | return __pgd(pte_val(pte)); | |
81 | } | |
82 | extern struct page *pgd_page(pgd_t pgd); | |
83 | ||
84 | #define pud_offset(pgdp, addr) \ | |
85 | (((pud_t *) pgd_page_vaddr(*(pgdp))) + \ | |
86 | (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))) | |
87 | ||
88 | #define pud_ERROR(e) \ | |
89 | pr_err("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pud_val(e)) | |
90 | ||
91 | /* | |
92 | * On all 4K setups, remap_4k_pfn() equates to remap_pfn_range() */ | |
93 | #define remap_4k_pfn(vma, addr, pfn, prot) \ | |
94 | remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, (prot)) | |
95 | ||
96 | #endif /* !__ASSEMBLY__ */ | |
97 | ||
98 | #endif /* _ASM_POWERPC_BOOK3S_64_HASH_4K_H */ |