]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
1e133ab2 MS |
2 | /* |
3 | * Page table allocation functions | |
4 | * | |
5 | * Copyright IBM Corp. 2016 | |
6 | * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> | |
7 | */ | |
8 | ||
9 | #include <linux/mm.h> | |
10 | #include <linux/sysctl.h> | |
11 | #include <asm/mmu_context.h> | |
12 | #include <asm/pgalloc.h> | |
13 | #include <asm/gmap.h> | |
14 | #include <asm/tlb.h> | |
15 | #include <asm/tlbflush.h> | |
16 | ||
17 | #ifdef CONFIG_PGSTE | |
18 | ||
19 | static int page_table_allocate_pgste_min = 0; | |
20 | static int page_table_allocate_pgste_max = 1; | |
21 | int page_table_allocate_pgste = 0; | |
22 | EXPORT_SYMBOL(page_table_allocate_pgste); | |
23 | ||
24 | static struct ctl_table page_table_sysctl[] = { | |
25 | { | |
26 | .procname = "allocate_pgste", | |
27 | .data = &page_table_allocate_pgste, | |
28 | .maxlen = sizeof(int), | |
29 | .mode = S_IRUGO | S_IWUSR, | |
30 | .proc_handler = proc_dointvec, | |
31 | .extra1 = &page_table_allocate_pgste_min, | |
32 | .extra2 = &page_table_allocate_pgste_max, | |
33 | }, | |
34 | { } | |
35 | }; | |
36 | ||
37 | static struct ctl_table page_table_sysctl_dir[] = { | |
38 | { | |
39 | .procname = "vm", | |
40 | .maxlen = 0, | |
41 | .mode = 0555, | |
42 | .child = page_table_sysctl, | |
43 | }, | |
44 | { } | |
45 | }; | |
46 | ||
47 | static int __init page_table_register_sysctl(void) | |
48 | { | |
49 | return register_sysctl_table(page_table_sysctl_dir) ? 0 : -ENOMEM; | |
50 | } | |
51 | __initcall(page_table_register_sysctl); | |
52 | ||
53 | #endif /* CONFIG_PGSTE */ | |
54 | ||
55 | unsigned long *crst_table_alloc(struct mm_struct *mm) | |
56 | { | |
57 | struct page *page = alloc_pages(GFP_KERNEL, 2); | |
58 | ||
59 | if (!page) | |
60 | return NULL; | |
c9b5ad54 | 61 | arch_set_page_dat(page, 2); |
1e133ab2 MS |
62 | return (unsigned long *) page_to_phys(page); |
63 | } | |
64 | ||
65 | void crst_table_free(struct mm_struct *mm, unsigned long *table) | |
66 | { | |
67 | free_pages((unsigned long) table, 2); | |
68 | } | |
69 | ||
70 | static void __crst_table_upgrade(void *arg) | |
71 | { | |
72 | struct mm_struct *mm = arg; | |
73 | ||
0aaba41b | 74 | if (current->active_mm == mm) |
1e133ab2 | 75 | set_user_asce(mm); |
1e133ab2 MS |
76 | __tlb_flush_local(); |
77 | } | |
78 | ||
1aea9b3f | 79 | int crst_table_upgrade(struct mm_struct *mm, unsigned long end) |
1e133ab2 MS |
80 | { |
81 | unsigned long *table, *pgd; | |
1aea9b3f | 82 | int rc, notify; |
1e133ab2 | 83 | |
1aea9b3f | 84 | /* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */ |
2fc4876e | 85 | VM_BUG_ON(mm->context.asce_limit < _REGION2_SIZE); |
1aea9b3f | 86 | if (end >= TASK_SIZE_MAX) |
1e133ab2 | 87 | return -ENOMEM; |
1aea9b3f MS |
88 | rc = 0; |
89 | notify = 0; | |
90 | while (mm->context.asce_limit < end) { | |
91 | table = crst_table_alloc(mm); | |
92 | if (!table) { | |
93 | rc = -ENOMEM; | |
94 | break; | |
95 | } | |
96 | spin_lock_bh(&mm->page_table_lock); | |
97 | pgd = (unsigned long *) mm->pgd; | |
f1c1174f | 98 | if (mm->context.asce_limit == _REGION2_SIZE) { |
1aea9b3f MS |
99 | crst_table_init(table, _REGION2_ENTRY_EMPTY); |
100 | p4d_populate(mm, (p4d_t *) table, (pud_t *) pgd); | |
101 | mm->pgd = (pgd_t *) table; | |
f1c1174f | 102 | mm->context.asce_limit = _REGION1_SIZE; |
1aea9b3f MS |
103 | mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | |
104 | _ASCE_USER_BITS | _ASCE_TYPE_REGION2; | |
105 | } else { | |
106 | crst_table_init(table, _REGION1_ENTRY_EMPTY); | |
107 | pgd_populate(mm, (pgd_t *) table, (p4d_t *) pgd); | |
108 | mm->pgd = (pgd_t *) table; | |
109 | mm->context.asce_limit = -PAGE_SIZE; | |
110 | mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | | |
111 | _ASCE_USER_BITS | _ASCE_TYPE_REGION1; | |
112 | } | |
113 | notify = 1; | |
114 | spin_unlock_bh(&mm->page_table_lock); | |
115 | } | |
116 | if (notify) | |
117 | on_each_cpu(__crst_table_upgrade, mm, 0); | |
118 | return rc; | |
1e133ab2 MS |
119 | } |
120 | ||
723cacbd | 121 | void crst_table_downgrade(struct mm_struct *mm) |
1e133ab2 MS |
122 | { |
123 | pgd_t *pgd; | |
124 | ||
723cacbd | 125 | /* downgrade should only happen from 3 to 2 levels (compat only) */ |
2fc4876e | 126 | VM_BUG_ON(mm->context.asce_limit != _REGION2_SIZE); |
723cacbd | 127 | |
1e133ab2 MS |
128 | if (current->active_mm == mm) { |
129 | clear_user_asce(); | |
130 | __tlb_flush_mm(mm); | |
131 | } | |
723cacbd GS |
132 | |
133 | pgd = mm->pgd; | |
134 | mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN); | |
f1c1174f | 135 | mm->context.asce_limit = _REGION3_SIZE; |
723cacbd GS |
136 | mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | |
137 | _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT; | |
723cacbd GS |
138 | crst_table_free(mm, (unsigned long *) pgd); |
139 | ||
1e133ab2 MS |
140 | if (current->active_mm == mm) |
141 | set_user_asce(mm); | |
142 | } | |
143 | ||
144 | static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits) | |
145 | { | |
146 | unsigned int old, new; | |
147 | ||
148 | do { | |
149 | old = atomic_read(v); | |
150 | new = old ^ bits; | |
151 | } while (atomic_cmpxchg(v, old, new) != old); | |
152 | return new; | |
153 | } | |
154 | ||
4be130a0 MS |
155 | #ifdef CONFIG_PGSTE |
156 | ||
157 | struct page *page_table_alloc_pgste(struct mm_struct *mm) | |
158 | { | |
159 | struct page *page; | |
41879ff6 | 160 | u64 *table; |
4be130a0 | 161 | |
faee35a5 | 162 | page = alloc_page(GFP_KERNEL); |
4be130a0 | 163 | if (page) { |
41879ff6 HC |
164 | table = (u64 *)page_to_phys(page); |
165 | memset64(table, _PAGE_INVALID, PTRS_PER_PTE); | |
166 | memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE); | |
4be130a0 MS |
167 | } |
168 | return page; | |
169 | } | |
170 | ||
171 | void page_table_free_pgste(struct page *page) | |
172 | { | |
173 | __free_page(page); | |
174 | } | |
175 | ||
176 | #endif /* CONFIG_PGSTE */ | |
177 | ||
1e133ab2 MS |
178 | /* |
179 | * page table entry allocation/free routines. | |
180 | */ | |
181 | unsigned long *page_table_alloc(struct mm_struct *mm) | |
182 | { | |
183 | unsigned long *table; | |
184 | struct page *page; | |
185 | unsigned int mask, bit; | |
186 | ||
187 | /* Try to get a fragment of a 4K page as a 2K page table */ | |
188 | if (!mm_alloc_pgste(mm)) { | |
189 | table = NULL; | |
f28a4b4d | 190 | spin_lock_bh(&mm->context.lock); |
1e133ab2 MS |
191 | if (!list_empty(&mm->context.pgtable_list)) { |
192 | page = list_first_entry(&mm->context.pgtable_list, | |
193 | struct page, lru); | |
194 | mask = atomic_read(&page->_mapcount); | |
195 | mask = (mask | (mask >> 4)) & 3; | |
196 | if (mask != 3) { | |
197 | table = (unsigned long *) page_to_phys(page); | |
198 | bit = mask & 1; /* =1 -> second 2K */ | |
199 | if (bit) | |
200 | table += PTRS_PER_PTE; | |
201 | atomic_xor_bits(&page->_mapcount, 1U << bit); | |
202 | list_del(&page->lru); | |
203 | } | |
204 | } | |
f28a4b4d | 205 | spin_unlock_bh(&mm->context.lock); |
1e133ab2 MS |
206 | if (table) |
207 | return table; | |
208 | } | |
209 | /* Allocate a fresh page */ | |
10d58bf2 | 210 | page = alloc_page(GFP_KERNEL); |
1e133ab2 MS |
211 | if (!page) |
212 | return NULL; | |
213 | if (!pgtable_page_ctor(page)) { | |
214 | __free_page(page); | |
215 | return NULL; | |
216 | } | |
c9b5ad54 | 217 | arch_set_page_dat(page, 0); |
1e133ab2 MS |
218 | /* Initialize page table */ |
219 | table = (unsigned long *) page_to_phys(page); | |
220 | if (mm_alloc_pgste(mm)) { | |
221 | /* Return 4K page table with PGSTEs */ | |
222 | atomic_set(&page->_mapcount, 3); | |
41879ff6 HC |
223 | memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE); |
224 | memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE); | |
1e133ab2 MS |
225 | } else { |
226 | /* Return the first 2K fragment of the page */ | |
227 | atomic_set(&page->_mapcount, 1); | |
41879ff6 | 228 | memset64((u64 *)table, _PAGE_INVALID, 2 * PTRS_PER_PTE); |
f28a4b4d | 229 | spin_lock_bh(&mm->context.lock); |
1e133ab2 | 230 | list_add(&page->lru, &mm->context.pgtable_list); |
f28a4b4d | 231 | spin_unlock_bh(&mm->context.lock); |
1e133ab2 MS |
232 | } |
233 | return table; | |
234 | } | |
235 | ||
236 | void page_table_free(struct mm_struct *mm, unsigned long *table) | |
237 | { | |
238 | struct page *page; | |
239 | unsigned int bit, mask; | |
240 | ||
241 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); | |
242 | if (!mm_alloc_pgste(mm)) { | |
243 | /* Free 2K page table fragment of a 4K page */ | |
244 | bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)); | |
f28a4b4d | 245 | spin_lock_bh(&mm->context.lock); |
1e133ab2 MS |
246 | mask = atomic_xor_bits(&page->_mapcount, 1U << bit); |
247 | if (mask & 3) | |
248 | list_add(&page->lru, &mm->context.pgtable_list); | |
249 | else | |
250 | list_del(&page->lru); | |
f28a4b4d | 251 | spin_unlock_bh(&mm->context.lock); |
1e133ab2 MS |
252 | if (mask != 0) |
253 | return; | |
254 | } | |
255 | ||
256 | pgtable_page_dtor(page); | |
257 | atomic_set(&page->_mapcount, -1); | |
258 | __free_page(page); | |
259 | } | |
260 | ||
261 | void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table, | |
262 | unsigned long vmaddr) | |
263 | { | |
264 | struct mm_struct *mm; | |
265 | struct page *page; | |
266 | unsigned int bit, mask; | |
267 | ||
268 | mm = tlb->mm; | |
269 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); | |
270 | if (mm_alloc_pgste(mm)) { | |
271 | gmap_unlink(mm, table, vmaddr); | |
272 | table = (unsigned long *) (__pa(table) | 3); | |
273 | tlb_remove_table(tlb, table); | |
274 | return; | |
275 | } | |
276 | bit = (__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)); | |
f28a4b4d | 277 | spin_lock_bh(&mm->context.lock); |
1e133ab2 MS |
278 | mask = atomic_xor_bits(&page->_mapcount, 0x11U << bit); |
279 | if (mask & 3) | |
280 | list_add_tail(&page->lru, &mm->context.pgtable_list); | |
281 | else | |
282 | list_del(&page->lru); | |
f28a4b4d | 283 | spin_unlock_bh(&mm->context.lock); |
1e133ab2 MS |
284 | table = (unsigned long *) (__pa(table) | (1U << bit)); |
285 | tlb_remove_table(tlb, table); | |
286 | } | |
287 | ||
288 | static void __tlb_remove_table(void *_table) | |
289 | { | |
290 | unsigned int mask = (unsigned long) _table & 3; | |
291 | void *table = (void *)((unsigned long) _table ^ mask); | |
292 | struct page *page = pfn_to_page(__pa(table) >> PAGE_SHIFT); | |
293 | ||
294 | switch (mask) { | |
1aea9b3f | 295 | case 0: /* pmd, pud, or p4d */ |
1e133ab2 MS |
296 | free_pages((unsigned long) table, 2); |
297 | break; | |
298 | case 1: /* lower 2K of a 4K page table */ | |
299 | case 2: /* higher 2K of a 4K page table */ | |
300 | if (atomic_xor_bits(&page->_mapcount, mask << 4) != 0) | |
301 | break; | |
302 | /* fallthrough */ | |
303 | case 3: /* 4K page table with pgstes */ | |
304 | pgtable_page_dtor(page); | |
305 | atomic_set(&page->_mapcount, -1); | |
306 | __free_page(page); | |
307 | break; | |
308 | } | |
309 | } | |
310 | ||
311 | static void tlb_remove_table_smp_sync(void *arg) | |
312 | { | |
313 | /* Simply deliver the interrupt */ | |
314 | } | |
315 | ||
316 | static void tlb_remove_table_one(void *table) | |
317 | { | |
318 | /* | |
319 | * This isn't an RCU grace period and hence the page-tables cannot be | |
320 | * assumed to be actually RCU-freed. | |
321 | * | |
322 | * It is however sufficient for software page-table walkers that rely | |
323 | * on IRQ disabling. See the comment near struct mmu_table_batch. | |
324 | */ | |
325 | smp_call_function(tlb_remove_table_smp_sync, NULL, 1); | |
326 | __tlb_remove_table(table); | |
327 | } | |
328 | ||
329 | static void tlb_remove_table_rcu(struct rcu_head *head) | |
330 | { | |
331 | struct mmu_table_batch *batch; | |
332 | int i; | |
333 | ||
334 | batch = container_of(head, struct mmu_table_batch, rcu); | |
335 | ||
336 | for (i = 0; i < batch->nr; i++) | |
337 | __tlb_remove_table(batch->tables[i]); | |
338 | ||
339 | free_page((unsigned long)batch); | |
340 | } | |
341 | ||
342 | void tlb_table_flush(struct mmu_gather *tlb) | |
343 | { | |
344 | struct mmu_table_batch **batch = &tlb->batch; | |
345 | ||
346 | if (*batch) { | |
347 | call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu); | |
348 | *batch = NULL; | |
349 | } | |
350 | } | |
351 | ||
352 | void tlb_remove_table(struct mmu_gather *tlb, void *table) | |
353 | { | |
354 | struct mmu_table_batch **batch = &tlb->batch; | |
355 | ||
356 | tlb->mm->context.flush_mm = 1; | |
357 | if (*batch == NULL) { | |
358 | *batch = (struct mmu_table_batch *) | |
359 | __get_free_page(GFP_NOWAIT | __GFP_NOWARN); | |
360 | if (*batch == NULL) { | |
361 | __tlb_flush_mm_lazy(tlb->mm); | |
362 | tlb_remove_table_one(table); | |
363 | return; | |
364 | } | |
365 | (*batch)->nr = 0; | |
366 | } | |
367 | (*batch)->tables[(*batch)->nr++] = table; | |
368 | if ((*batch)->nr == MAX_TABLE_BATCH) | |
369 | tlb_flush_mmu(tlb); | |
370 | } |