]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/s390/mm/pgalloc.c
s390/mm: fix mis-accounting of pgtable_bytes
[mirror_ubuntu-bionic-kernel.git] / arch / s390 / mm / pgalloc.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Page table allocation functions
4 *
5 * Copyright IBM Corp. 2016
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7 */
8
9 #include <linux/mm.h>
10 #include <linux/sysctl.h>
11 #include <asm/mmu_context.h>
12 #include <asm/pgalloc.h>
13 #include <asm/gmap.h>
14 #include <asm/tlb.h>
15 #include <asm/tlbflush.h>
16
17 #ifdef CONFIG_PGSTE
18
19 static int page_table_allocate_pgste_min = 0;
20 static int page_table_allocate_pgste_max = 1;
21 int page_table_allocate_pgste = 0;
22 EXPORT_SYMBOL(page_table_allocate_pgste);
23
24 static struct ctl_table page_table_sysctl[] = {
25 {
26 .procname = "allocate_pgste",
27 .data = &page_table_allocate_pgste,
28 .maxlen = sizeof(int),
29 .mode = S_IRUGO | S_IWUSR,
30 .proc_handler = proc_dointvec_minmax,
31 .extra1 = &page_table_allocate_pgste_min,
32 .extra2 = &page_table_allocate_pgste_max,
33 },
34 { }
35 };
36
37 static struct ctl_table page_table_sysctl_dir[] = {
38 {
39 .procname = "vm",
40 .maxlen = 0,
41 .mode = 0555,
42 .child = page_table_sysctl,
43 },
44 { }
45 };
46
47 static int __init page_table_register_sysctl(void)
48 {
49 return register_sysctl_table(page_table_sysctl_dir) ? 0 : -ENOMEM;
50 }
51 __initcall(page_table_register_sysctl);
52
53 #endif /* CONFIG_PGSTE */
54
55 unsigned long *crst_table_alloc(struct mm_struct *mm)
56 {
57 struct page *page = alloc_pages(GFP_KERNEL, 2);
58
59 if (!page)
60 return NULL;
61 arch_set_page_dat(page, 2);
62 return (unsigned long *) page_to_phys(page);
63 }
64
65 void crst_table_free(struct mm_struct *mm, unsigned long *table)
66 {
67 free_pages((unsigned long) table, 2);
68 }
69
70 static void __crst_table_upgrade(void *arg)
71 {
72 struct mm_struct *mm = arg;
73
74 if (current->active_mm == mm)
75 set_user_asce(mm);
76 __tlb_flush_local();
77 }
78
79 int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
80 {
81 unsigned long *table, *pgd;
82 int rc, notify;
83
84 /* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */
85 VM_BUG_ON(mm->context.asce_limit < _REGION2_SIZE);
86 rc = 0;
87 notify = 0;
88 while (mm->context.asce_limit < end) {
89 table = crst_table_alloc(mm);
90 if (!table) {
91 rc = -ENOMEM;
92 break;
93 }
94 spin_lock_bh(&mm->page_table_lock);
95 pgd = (unsigned long *) mm->pgd;
96 if (mm->context.asce_limit == _REGION2_SIZE) {
97 crst_table_init(table, _REGION2_ENTRY_EMPTY);
98 p4d_populate(mm, (p4d_t *) table, (pud_t *) pgd);
99 mm->pgd = (pgd_t *) table;
100 mm->context.asce_limit = _REGION1_SIZE;
101 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
102 _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
103 mm_inc_nr_puds(mm);
104 } else {
105 crst_table_init(table, _REGION1_ENTRY_EMPTY);
106 pgd_populate(mm, (pgd_t *) table, (p4d_t *) pgd);
107 mm->pgd = (pgd_t *) table;
108 mm->context.asce_limit = -PAGE_SIZE;
109 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
110 _ASCE_USER_BITS | _ASCE_TYPE_REGION1;
111 }
112 notify = 1;
113 spin_unlock_bh(&mm->page_table_lock);
114 }
115 if (notify)
116 on_each_cpu(__crst_table_upgrade, mm, 0);
117 return rc;
118 }
119
120 void crst_table_downgrade(struct mm_struct *mm)
121 {
122 pgd_t *pgd;
123
124 /* downgrade should only happen from 3 to 2 levels (compat only) */
125 VM_BUG_ON(mm->context.asce_limit != _REGION2_SIZE);
126
127 if (current->active_mm == mm) {
128 clear_user_asce();
129 __tlb_flush_mm(mm);
130 }
131
132 pgd = mm->pgd;
133 mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
134 mm->context.asce_limit = _REGION3_SIZE;
135 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
136 _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
137 crst_table_free(mm, (unsigned long *) pgd);
138
139 if (current->active_mm == mm)
140 set_user_asce(mm);
141 }
142
143 static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
144 {
145 unsigned int old, new;
146
147 do {
148 old = atomic_read(v);
149 new = old ^ bits;
150 } while (atomic_cmpxchg(v, old, new) != old);
151 return new;
152 }
153
154 #ifdef CONFIG_PGSTE
155
156 struct page *page_table_alloc_pgste(struct mm_struct *mm)
157 {
158 struct page *page;
159 u64 *table;
160
161 page = alloc_page(GFP_KERNEL);
162 if (page) {
163 table = (u64 *)page_to_phys(page);
164 memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
165 memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
166 }
167 return page;
168 }
169
170 void page_table_free_pgste(struct page *page)
171 {
172 __free_page(page);
173 }
174
175 #endif /* CONFIG_PGSTE */
176
177 /*
178 * page table entry allocation/free routines.
179 */
180 unsigned long *page_table_alloc(struct mm_struct *mm)
181 {
182 unsigned long *table;
183 struct page *page;
184 unsigned int mask, bit;
185
186 /* Try to get a fragment of a 4K page as a 2K page table */
187 if (!mm_alloc_pgste(mm)) {
188 table = NULL;
189 spin_lock_bh(&mm->context.lock);
190 if (!list_empty(&mm->context.pgtable_list)) {
191 page = list_first_entry(&mm->context.pgtable_list,
192 struct page, lru);
193 mask = atomic_read(&page->_mapcount);
194 mask = (mask | (mask >> 4)) & 3;
195 if (mask != 3) {
196 table = (unsigned long *) page_to_phys(page);
197 bit = mask & 1; /* =1 -> second 2K */
198 if (bit)
199 table += PTRS_PER_PTE;
200 atomic_xor_bits(&page->_mapcount, 1U << bit);
201 list_del(&page->lru);
202 }
203 }
204 spin_unlock_bh(&mm->context.lock);
205 if (table)
206 return table;
207 }
208 /* Allocate a fresh page */
209 page = alloc_page(GFP_KERNEL);
210 if (!page)
211 return NULL;
212 if (!pgtable_page_ctor(page)) {
213 __free_page(page);
214 return NULL;
215 }
216 arch_set_page_dat(page, 0);
217 /* Initialize page table */
218 table = (unsigned long *) page_to_phys(page);
219 if (mm_alloc_pgste(mm)) {
220 /* Return 4K page table with PGSTEs */
221 atomic_set(&page->_mapcount, 3);
222 memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
223 memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
224 } else {
225 /* Return the first 2K fragment of the page */
226 atomic_set(&page->_mapcount, 1);
227 memset64((u64 *)table, _PAGE_INVALID, 2 * PTRS_PER_PTE);
228 spin_lock_bh(&mm->context.lock);
229 list_add(&page->lru, &mm->context.pgtable_list);
230 spin_unlock_bh(&mm->context.lock);
231 }
232 return table;
233 }
234
235 void page_table_free(struct mm_struct *mm, unsigned long *table)
236 {
237 struct page *page;
238 unsigned int bit, mask;
239
240 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
241 if (!mm_alloc_pgste(mm)) {
242 /* Free 2K page table fragment of a 4K page */
243 bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t));
244 spin_lock_bh(&mm->context.lock);
245 mask = atomic_xor_bits(&page->_mapcount, 1U << bit);
246 if (mask & 3)
247 list_add(&page->lru, &mm->context.pgtable_list);
248 else
249 list_del(&page->lru);
250 spin_unlock_bh(&mm->context.lock);
251 if (mask != 0)
252 return;
253 }
254
255 pgtable_page_dtor(page);
256 atomic_set(&page->_mapcount, -1);
257 __free_page(page);
258 }
259
260 void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
261 unsigned long vmaddr)
262 {
263 struct mm_struct *mm;
264 struct page *page;
265 unsigned int bit, mask;
266
267 mm = tlb->mm;
268 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
269 if (mm_alloc_pgste(mm)) {
270 gmap_unlink(mm, table, vmaddr);
271 table = (unsigned long *) (__pa(table) | 3);
272 tlb_remove_table(tlb, table);
273 return;
274 }
275 bit = (__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t));
276 spin_lock_bh(&mm->context.lock);
277 mask = atomic_xor_bits(&page->_mapcount, 0x11U << bit);
278 if (mask & 3)
279 list_add_tail(&page->lru, &mm->context.pgtable_list);
280 else
281 list_del(&page->lru);
282 spin_unlock_bh(&mm->context.lock);
283 table = (unsigned long *) (__pa(table) | (1U << bit));
284 tlb_remove_table(tlb, table);
285 }
286
287 static void __tlb_remove_table(void *_table)
288 {
289 unsigned int mask = (unsigned long) _table & 3;
290 void *table = (void *)((unsigned long) _table ^ mask);
291 struct page *page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
292
293 switch (mask) {
294 case 0: /* pmd, pud, or p4d */
295 free_pages((unsigned long) table, 2);
296 break;
297 case 1: /* lower 2K of a 4K page table */
298 case 2: /* higher 2K of a 4K page table */
299 if (atomic_xor_bits(&page->_mapcount, mask << 4) != 0)
300 break;
301 /* fallthrough */
302 case 3: /* 4K page table with pgstes */
303 pgtable_page_dtor(page);
304 atomic_set(&page->_mapcount, -1);
305 __free_page(page);
306 break;
307 }
308 }
309
310 static void tlb_remove_table_smp_sync(void *arg)
311 {
312 /* Simply deliver the interrupt */
313 }
314
315 static void tlb_remove_table_one(void *table)
316 {
317 /*
318 * This isn't an RCU grace period and hence the page-tables cannot be
319 * assumed to be actually RCU-freed.
320 *
321 * It is however sufficient for software page-table walkers that rely
322 * on IRQ disabling. See the comment near struct mmu_table_batch.
323 */
324 smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
325 __tlb_remove_table(table);
326 }
327
328 static void tlb_remove_table_rcu(struct rcu_head *head)
329 {
330 struct mmu_table_batch *batch;
331 int i;
332
333 batch = container_of(head, struct mmu_table_batch, rcu);
334
335 for (i = 0; i < batch->nr; i++)
336 __tlb_remove_table(batch->tables[i]);
337
338 free_page((unsigned long)batch);
339 }
340
341 void tlb_table_flush(struct mmu_gather *tlb)
342 {
343 struct mmu_table_batch **batch = &tlb->batch;
344
345 if (*batch) {
346 call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
347 *batch = NULL;
348 }
349 }
350
351 void tlb_remove_table(struct mmu_gather *tlb, void *table)
352 {
353 struct mmu_table_batch **batch = &tlb->batch;
354
355 tlb->mm->context.flush_mm = 1;
356 if (*batch == NULL) {
357 *batch = (struct mmu_table_batch *)
358 __get_free_page(GFP_NOWAIT | __GFP_NOWARN);
359 if (*batch == NULL) {
360 __tlb_flush_mm_lazy(tlb->mm);
361 tlb_remove_table_one(table);
362 return;
363 }
364 (*batch)->nr = 0;
365 }
366 (*batch)->tables[(*batch)->nr++] = table;
367 if ((*batch)->nr == MAX_TABLE_BATCH)
368 tlb_flush_mmu(tlb);
369 }