]>
Commit | Line | Data |
---|---|---|
26b6a3d9 AK |
1 | #ifndef _ASM_POWERPC_BOOK3S_64_HASH_H |
2 | #define _ASM_POWERPC_BOOK3S_64_HASH_H | |
c605782b BH |
3 | #ifdef __KERNEL__ |
4 | ||
e34aa03c AK |
5 | /* |
6 | * Common bits between 4K and 64K pages in a linux-style PTE. | |
1ec3f937 | 7 | * Additional bits may be defined in pgtable-hash64-*.h |
e34aa03c | 8 | * |
e34aa03c | 9 | */ |
d2cf0050 | 10 | #define H_PTE_NONE_MASK _PAGE_HPTEFLAGS |
6aa59f51 AK |
11 | #define H_PAGE_F_GIX_SHIFT 56 |
12 | #define H_PAGE_BUSY _RPAGE_RSV1 /* software: PTE & hash are busy */ | |
13 | #define H_PAGE_F_SECOND _RPAGE_RSV2 /* HPTE is in 2ndary HPTEG */ | |
14 | #define H_PAGE_F_GIX (_RPAGE_RSV3 | _RPAGE_RSV4 | _RPAGE_RPN44) | |
15 | #define H_PAGE_HASHPTE _RPAGE_RPN43 /* PTE has associated HPTE */ | |
e34aa03c | 16 | |
371352ca AK |
17 | #ifdef CONFIG_PPC_64K_PAGES |
18 | #include <asm/book3s/64/hash-64k.h> | |
19 | #else | |
20 | #include <asm/book3s/64/hash-4k.h> | |
21 | #endif | |
22 | ||
23 | /* | |
24 | * Size of EA range mapped by our pagetables. | |
25 | */ | |
dd1842a2 AK |
26 | #define H_PGTABLE_EADDR_SIZE (H_PTE_INDEX_SIZE + H_PMD_INDEX_SIZE + \ |
27 | H_PUD_INDEX_SIZE + H_PGD_INDEX_SIZE + PAGE_SHIFT) | |
28 | #define H_PGTABLE_RANGE (ASM_CONST(1) << H_PGTABLE_EADDR_SIZE) | |
371352ca | 29 | |
8ad43336 | 30 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_PPC_64K_PAGES) |
dd1842a2 | 31 | /* |
8ad43336 | 32 | * only with hash 64k we need to use the second half of pmd page table |
dd1842a2 AK |
33 | * to store pointer to deposited pgtable_t |
34 | */ | |
35 | #define H_PMD_CACHE_INDEX (H_PMD_INDEX_SIZE + 1) | |
371352ca | 36 | #else |
dd1842a2 | 37 | #define H_PMD_CACHE_INDEX H_PMD_INDEX_SIZE |
371352ca AK |
38 | #endif |
39 | /* | |
40 | * Define the address range of the kernel non-linear virtual area | |
41 | */ | |
d6a9996e AK |
42 | #define H_KERN_VIRT_START ASM_CONST(0xD000000000000000) |
43 | #define H_KERN_VIRT_SIZE ASM_CONST(0x0000100000000000) | |
371352ca AK |
44 | |
45 | /* | |
46 | * The vmalloc space starts at the beginning of that region, and | |
47 | * occupies half of it on hash CPUs and a quarter of it on Book3E | |
48 | * (we keep a quarter for the virtual memmap) | |
49 | */ | |
d6a9996e AK |
50 | #define H_VMALLOC_START H_KERN_VIRT_START |
51 | #define H_VMALLOC_SIZE (H_KERN_VIRT_SIZE >> 1) | |
52 | #define H_VMALLOC_END (H_VMALLOC_START + H_VMALLOC_SIZE) | |
371352ca AK |
53 | |
54 | /* | |
55 | * Region IDs | |
56 | */ | |
57 | #define REGION_SHIFT 60UL | |
58 | #define REGION_MASK (0xfUL << REGION_SHIFT) | |
59 | #define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT) | |
60 | ||
d6a9996e | 61 | #define VMALLOC_REGION_ID (REGION_ID(H_VMALLOC_START)) |
371352ca AK |
62 | #define KERNEL_REGION_ID (REGION_ID(PAGE_OFFSET)) |
63 | #define VMEMMAP_REGION_ID (0xfUL) /* Server only */ | |
64 | #define USER_REGION_ID (0UL) | |
65 | ||
66 | /* | |
67 | * Defines the address of the vmemap area, in its own region on | |
68 | * hash table CPUs. | |
69 | */ | |
d6a9996e | 70 | #define H_VMEMMAP_BASE (VMEMMAP_REGION_ID << REGION_SHIFT) |
371352ca AK |
71 | |
72 | #ifdef CONFIG_PPC_MM_SLICES | |
73 | #define HAVE_ARCH_UNMAPPED_AREA | |
74 | #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN | |
75 | #endif /* CONFIG_PPC_MM_SLICES */ | |
8d1cf34e | 76 | |
c605782b BH |
77 | |
78 | /* PTEIDX nibble */ | |
79 | #define _PTEIDX_SECONDARY 0x8 | |
80 | #define _PTEIDX_GROUP_IX 0x7 | |
81 | ||
ac94ac79 AK |
82 | #define H_PMD_BAD_BITS (PTE_TABLE_SIZE-1) |
83 | #define H_PUD_BAD_BITS (PMD_TABLE_SIZE-1) | |
371352ca AK |
84 | |
85 | #ifndef __ASSEMBLY__ | |
ac94ac79 AK |
86 | #define hash__pmd_bad(pmd) (pmd_val(pmd) & H_PMD_BAD_BITS) |
87 | #define hash__pud_bad(pud) (pud_val(pud) & H_PUD_BAD_BITS) | |
88 | static inline int hash__pgd_bad(pgd_t pgd) | |
89 | { | |
90 | return (pgd_val(pgd) == 0); | |
91 | } | |
cd65d697 BS |
92 | #ifdef CONFIG_STRICT_KERNEL_RWX |
93 | extern void hash__mark_rodata_ro(void); | |
94 | #endif | |
371352ca AK |
95 | |
96 | extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr, | |
97 | pte_t *ptep, unsigned long pte, int huge); | |
c6a3c495 | 98 | extern unsigned long htab_convert_pte_flags(unsigned long pteflags); |
371352ca | 99 | /* Atomic PTE updates */ |
ac94ac79 AK |
100 | static inline unsigned long hash__pte_update(struct mm_struct *mm, |
101 | unsigned long addr, | |
102 | pte_t *ptep, unsigned long clr, | |
103 | unsigned long set, | |
104 | int huge) | |
371352ca | 105 | { |
5dc1ef85 AK |
106 | __be64 old_be, tmp_be; |
107 | unsigned long old; | |
371352ca AK |
108 | |
109 | __asm__ __volatile__( | |
110 | "1: ldarx %0,0,%3 # pte_update\n\ | |
5dc1ef85 | 111 | and. %1,%0,%6\n\ |
371352ca AK |
112 | bne- 1b \n\ |
113 | andc %1,%0,%4 \n\ | |
114 | or %1,%1,%7\n\ | |
115 | stdcx. %1,0,%3 \n\ | |
116 | bne- 1b" | |
5dc1ef85 AK |
117 | : "=&r" (old_be), "=&r" (tmp_be), "=m" (*ptep) |
118 | : "r" (ptep), "r" (cpu_to_be64(clr)), "m" (*ptep), | |
945537df | 119 | "r" (cpu_to_be64(H_PAGE_BUSY)), "r" (cpu_to_be64(set)) |
371352ca AK |
120 | : "cc" ); |
121 | /* huge pages use the old page table lock */ | |
122 | if (!huge) | |
123 | assert_pte_locked(mm, addr); | |
124 | ||
5dc1ef85 | 125 | old = be64_to_cpu(old_be); |
945537df | 126 | if (old & H_PAGE_HASHPTE) |
371352ca AK |
127 | hpte_need_flush(mm, addr, ptep, old, huge); |
128 | ||
129 | return old; | |
130 | } | |
131 | ||
371352ca AK |
132 | /* Set the dirty and/or accessed bits atomically in a linux PTE, this |
133 | * function doesn't need to flush the hash entry | |
134 | */ | |
ac94ac79 | 135 | static inline void hash__ptep_set_access_flags(pte_t *ptep, pte_t entry) |
371352ca | 136 | { |
5dc1ef85 AK |
137 | __be64 old, tmp, val, mask; |
138 | ||
c7d54842 | 139 | mask = cpu_to_be64(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_READ | _PAGE_WRITE | |
5dc1ef85 | 140 | _PAGE_EXEC | _PAGE_SOFT_DIRTY); |
371352ca | 141 | |
5dc1ef85 | 142 | val = pte_raw(entry) & mask; |
371352ca AK |
143 | |
144 | __asm__ __volatile__( | |
145 | "1: ldarx %0,0,%4\n\ | |
5dc1ef85 | 146 | and. %1,%0,%6\n\ |
371352ca AK |
147 | bne- 1b \n\ |
148 | or %0,%3,%0\n\ | |
149 | stdcx. %0,0,%4\n\ | |
150 | bne- 1b" | |
151 | :"=&r" (old), "=&r" (tmp), "=m" (*ptep) | |
945537df | 152 | :"r" (val), "r" (ptep), "m" (*ptep), "r" (cpu_to_be64(H_PAGE_BUSY)) |
371352ca AK |
153 | :"cc"); |
154 | } | |
155 | ||
ac94ac79 | 156 | static inline int hash__pte_same(pte_t pte_a, pte_t pte_b) |
368ced78 | 157 | { |
ac94ac79 | 158 | return (((pte_raw(pte_a) ^ pte_raw(pte_b)) & ~cpu_to_be64(_PAGE_HPTEFLAGS)) == 0); |
368ced78 AK |
159 | } |
160 | ||
ac94ac79 | 161 | static inline int hash__pte_none(pte_t pte) |
ee3caed3 | 162 | { |
ac94ac79 | 163 | return (pte_val(pte) & ~H_PTE_NONE_MASK) == 0; |
ee3caed3 ME |
164 | } |
165 | ||
1ca72129 AK |
166 | /* This low level function performs the actual PTE insertion |
167 | * Setting the PTE depends on the MMU type and other factors. It's | |
168 | * an horrible mess that I'm not going to try to clean up now but | |
169 | * I'm keeping it in one place rather than spread around | |
170 | */ | |
ac94ac79 AK |
171 | static inline void hash__set_pte_at(struct mm_struct *mm, unsigned long addr, |
172 | pte_t *ptep, pte_t pte, int percpu) | |
1ca72129 AK |
173 | { |
174 | /* | |
175 | * Anything else just stores the PTE normally. That covers all 64-bit | |
176 | * cases, and 32-bit non-hash with 32-bit PTEs. | |
177 | */ | |
178 | *ptep = pte; | |
179 | } | |
180 | ||
371352ca AK |
181 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
182 | extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr, | |
183 | pmd_t *pmdp, unsigned long old_pmd); | |
184 | #else | |
185 | static inline void hpte_do_hugepage_flush(struct mm_struct *mm, | |
186 | unsigned long addr, pmd_t *pmdp, | |
187 | unsigned long old_pmd) | |
188 | { | |
189 | WARN(1, "%s called with THP disabled\n", __func__); | |
190 | } | |
191 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | |
192 | ||
31a14fae AK |
193 | |
194 | extern int hash__map_kernel_page(unsigned long ea, unsigned long pa, | |
195 | unsigned long flags); | |
196 | extern int __meminit hash__vmemmap_create_mapping(unsigned long start, | |
197 | unsigned long page_size, | |
198 | unsigned long phys); | |
199 | extern void hash__vmemmap_remove_mapping(unsigned long start, | |
200 | unsigned long page_size); | |
32b53c01 RA |
201 | |
202 | int hash__create_section_mapping(unsigned long start, unsigned long end); | |
203 | int hash__remove_section_mapping(unsigned long start, unsigned long end); | |
204 | ||
371352ca | 205 | #endif /* !__ASSEMBLY__ */ |
c605782b | 206 | #endif /* __KERNEL__ */ |
26b6a3d9 | 207 | #endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */ |