1 // SPDX-License-Identifier: GPL-2.0
3 * Page table allocation functions
5 * Copyright IBM Corp. 2016
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
9 #include <linux/sysctl.h>
10 #include <linux/slab.h>
12 #include <asm/mmu_context.h>
13 #include <asm/pgalloc.h>
16 #include <asm/tlbflush.h>
20 int page_table_allocate_pgste
= 0;
21 EXPORT_SYMBOL(page_table_allocate_pgste
);
23 static struct ctl_table page_table_sysctl
[] = {
25 .procname
= "allocate_pgste",
26 .data
= &page_table_allocate_pgste
,
27 .maxlen
= sizeof(int),
28 .mode
= S_IRUGO
| S_IWUSR
,
29 .proc_handler
= proc_dointvec_minmax
,
30 .extra1
= SYSCTL_ZERO
,
36 static struct ctl_table page_table_sysctl_dir
[] = {
41 .child
= page_table_sysctl
,
46 static int __init
page_table_register_sysctl(void)
48 return register_sysctl_table(page_table_sysctl_dir
) ? 0 : -ENOMEM
;
50 __initcall(page_table_register_sysctl
);
52 #endif /* CONFIG_PGSTE */
54 unsigned long *crst_table_alloc(struct mm_struct
*mm
)
56 struct page
*page
= alloc_pages(GFP_KERNEL
, 2);
60 arch_set_page_dat(page
, 2);
61 return (unsigned long *) page_to_phys(page
);
64 void crst_table_free(struct mm_struct
*mm
, unsigned long *table
)
66 free_pages((unsigned long) table
, 2);
69 static void __crst_table_upgrade(void *arg
)
71 struct mm_struct
*mm
= arg
;
73 /* we must change all active ASCEs to avoid the creation of new TLBs */
74 if (current
->active_mm
== mm
) {
75 S390_lowcore
.user_asce
= mm
->context
.asce
;
76 if (current
->thread
.mm_segment
== USER_DS
) {
77 __ctl_load(S390_lowcore
.user_asce
, 1, 1);
78 /* Mark user-ASCE present in CR1 */
79 clear_cpu_flag(CIF_ASCE_PRIMARY
);
81 if (current
->thread
.mm_segment
== USER_DS_SACF
) {
82 __ctl_load(S390_lowcore
.user_asce
, 7, 7);
83 /* enable_sacf_uaccess does all or nothing */
84 WARN_ON(!test_cpu_flag(CIF_ASCE_SECONDARY
));
90 int crst_table_upgrade(struct mm_struct
*mm
, unsigned long end
)
92 unsigned long *pgd
= NULL
, *p4d
= NULL
, *__pgd
;
93 unsigned long asce_limit
= mm
->context
.asce_limit
;
95 /* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */
96 VM_BUG_ON(asce_limit
< _REGION2_SIZE
);
98 if (end
<= asce_limit
)
101 if (asce_limit
== _REGION2_SIZE
) {
102 p4d
= crst_table_alloc(mm
);
105 crst_table_init(p4d
, _REGION2_ENTRY_EMPTY
);
107 if (end
> _REGION1_SIZE
) {
108 pgd
= crst_table_alloc(mm
);
111 crst_table_init(pgd
, _REGION1_ENTRY_EMPTY
);
114 spin_lock_bh(&mm
->page_table_lock
);
117 * This routine gets called with mmap_lock lock held and there is
118 * no reason to optimize for the case of otherwise. However, if
119 * that would ever change, the below check will let us know.
121 VM_BUG_ON(asce_limit
!= mm
->context
.asce_limit
);
124 __pgd
= (unsigned long *) mm
->pgd
;
125 p4d_populate(mm
, (p4d_t
*) p4d
, (pud_t
*) __pgd
);
126 mm
->pgd
= (pgd_t
*) p4d
;
127 mm
->context
.asce_limit
= _REGION1_SIZE
;
128 mm
->context
.asce
= __pa(mm
->pgd
) | _ASCE_TABLE_LENGTH
|
129 _ASCE_USER_BITS
| _ASCE_TYPE_REGION2
;
133 __pgd
= (unsigned long *) mm
->pgd
;
134 pgd_populate(mm
, (pgd_t
*) pgd
, (p4d_t
*) __pgd
);
135 mm
->pgd
= (pgd_t
*) pgd
;
136 mm
->context
.asce_limit
= TASK_SIZE_MAX
;
137 mm
->context
.asce
= __pa(mm
->pgd
) | _ASCE_TABLE_LENGTH
|
138 _ASCE_USER_BITS
| _ASCE_TYPE_REGION1
;
141 spin_unlock_bh(&mm
->page_table_lock
);
143 on_each_cpu(__crst_table_upgrade
, mm
, 0);
148 crst_table_free(mm
, p4d
);
153 static inline unsigned int atomic_xor_bits(atomic_t
*v
, unsigned int bits
)
155 unsigned int old
, new;
158 old
= atomic_read(v
);
160 } while (atomic_cmpxchg(v
, old
, new) != old
);
166 struct page
*page_table_alloc_pgste(struct mm_struct
*mm
)
171 page
= alloc_page(GFP_KERNEL
);
173 table
= (u64
*)page_to_phys(page
);
174 memset64(table
, _PAGE_INVALID
, PTRS_PER_PTE
);
175 memset64(table
+ PTRS_PER_PTE
, 0, PTRS_PER_PTE
);
180 void page_table_free_pgste(struct page
*page
)
185 #endif /* CONFIG_PGSTE */
188 * page table entry allocation/free routines.
190 unsigned long *page_table_alloc(struct mm_struct
*mm
)
192 unsigned long *table
;
194 unsigned int mask
, bit
;
196 /* Try to get a fragment of a 4K page as a 2K page table */
197 if (!mm_alloc_pgste(mm
)) {
199 spin_lock_bh(&mm
->context
.lock
);
200 if (!list_empty(&mm
->context
.pgtable_list
)) {
201 page
= list_first_entry(&mm
->context
.pgtable_list
,
203 mask
= atomic_read(&page
->_refcount
) >> 24;
204 mask
= (mask
| (mask
>> 4)) & 3;
206 table
= (unsigned long *) page_to_phys(page
);
207 bit
= mask
& 1; /* =1 -> second 2K */
209 table
+= PTRS_PER_PTE
;
210 atomic_xor_bits(&page
->_refcount
,
212 list_del(&page
->lru
);
215 spin_unlock_bh(&mm
->context
.lock
);
219 /* Allocate a fresh page */
220 page
= alloc_page(GFP_KERNEL
);
223 if (!pgtable_pte_page_ctor(page
)) {
227 arch_set_page_dat(page
, 0);
228 /* Initialize page table */
229 table
= (unsigned long *) page_to_phys(page
);
230 if (mm_alloc_pgste(mm
)) {
231 /* Return 4K page table with PGSTEs */
232 atomic_xor_bits(&page
->_refcount
, 3 << 24);
233 memset64((u64
*)table
, _PAGE_INVALID
, PTRS_PER_PTE
);
234 memset64((u64
*)table
+ PTRS_PER_PTE
, 0, PTRS_PER_PTE
);
236 /* Return the first 2K fragment of the page */
237 atomic_xor_bits(&page
->_refcount
, 1 << 24);
238 memset64((u64
*)table
, _PAGE_INVALID
, 2 * PTRS_PER_PTE
);
239 spin_lock_bh(&mm
->context
.lock
);
240 list_add(&page
->lru
, &mm
->context
.pgtable_list
);
241 spin_unlock_bh(&mm
->context
.lock
);
246 void page_table_free(struct mm_struct
*mm
, unsigned long *table
)
249 unsigned int bit
, mask
;
251 page
= pfn_to_page(__pa(table
) >> PAGE_SHIFT
);
252 if (!mm_alloc_pgste(mm
)) {
253 /* Free 2K page table fragment of a 4K page */
254 bit
= (__pa(table
) & ~PAGE_MASK
)/(PTRS_PER_PTE
*sizeof(pte_t
));
255 spin_lock_bh(&mm
->context
.lock
);
256 mask
= atomic_xor_bits(&page
->_refcount
, 1U << (bit
+ 24));
259 list_add(&page
->lru
, &mm
->context
.pgtable_list
);
261 list_del(&page
->lru
);
262 spin_unlock_bh(&mm
->context
.lock
);
266 atomic_xor_bits(&page
->_refcount
, 3U << 24);
269 pgtable_pte_page_dtor(page
);
273 void page_table_free_rcu(struct mmu_gather
*tlb
, unsigned long *table
,
274 unsigned long vmaddr
)
276 struct mm_struct
*mm
;
278 unsigned int bit
, mask
;
281 page
= pfn_to_page(__pa(table
) >> PAGE_SHIFT
);
282 if (mm_alloc_pgste(mm
)) {
283 gmap_unlink(mm
, table
, vmaddr
);
284 table
= (unsigned long *) (__pa(table
) | 3);
285 tlb_remove_table(tlb
, table
);
288 bit
= (__pa(table
) & ~PAGE_MASK
) / (PTRS_PER_PTE
*sizeof(pte_t
));
289 spin_lock_bh(&mm
->context
.lock
);
290 mask
= atomic_xor_bits(&page
->_refcount
, 0x11U
<< (bit
+ 24));
293 list_add_tail(&page
->lru
, &mm
->context
.pgtable_list
);
295 list_del(&page
->lru
);
296 spin_unlock_bh(&mm
->context
.lock
);
297 table
= (unsigned long *) (__pa(table
) | (1U << bit
));
298 tlb_remove_table(tlb
, table
);
301 void __tlb_remove_table(void *_table
)
303 unsigned int mask
= (unsigned long) _table
& 3;
304 void *table
= (void *)((unsigned long) _table
^ mask
);
305 struct page
*page
= pfn_to_page(__pa(table
) >> PAGE_SHIFT
);
308 case 0: /* pmd, pud, or p4d */
309 free_pages((unsigned long) table
, 2);
311 case 1: /* lower 2K of a 4K page table */
312 case 2: /* higher 2K of a 4K page table */
313 mask
= atomic_xor_bits(&page
->_refcount
, mask
<< (4 + 24));
318 case 3: /* 4K page table with pgstes */
320 atomic_xor_bits(&page
->_refcount
, 3 << 24);
321 pgtable_pte_page_dtor(page
);
328 * Base infrastructure required to generate basic asces, region, segment,
329 * and page tables that do not make use of enhanced features like EDAT1.
332 static struct kmem_cache
*base_pgt_cache
;
334 static unsigned long base_pgt_alloc(void)
338 table
= kmem_cache_alloc(base_pgt_cache
, GFP_KERNEL
);
340 memset64(table
, _PAGE_INVALID
, PTRS_PER_PTE
);
341 return (unsigned long) table
;
344 static void base_pgt_free(unsigned long table
)
346 kmem_cache_free(base_pgt_cache
, (void *) table
);
349 static unsigned long base_crst_alloc(unsigned long val
)
353 table
= __get_free_pages(GFP_KERNEL
, CRST_ALLOC_ORDER
);
355 crst_table_init((unsigned long *)table
, val
);
359 static void base_crst_free(unsigned long table
)
361 free_pages(table
, CRST_ALLOC_ORDER
);
364 #define BASE_ADDR_END_FUNC(NAME, SIZE) \
365 static inline unsigned long base_##NAME##_addr_end(unsigned long addr, \
368 unsigned long next = (addr + (SIZE)) & ~((SIZE) - 1); \
370 return (next - 1) < (end - 1) ? next : end; \
373 BASE_ADDR_END_FUNC(page
, _PAGE_SIZE
)
374 BASE_ADDR_END_FUNC(segment
, _SEGMENT_SIZE
)
375 BASE_ADDR_END_FUNC(region3
, _REGION3_SIZE
)
376 BASE_ADDR_END_FUNC(region2
, _REGION2_SIZE
)
377 BASE_ADDR_END_FUNC(region1
, _REGION1_SIZE
)
379 static inline unsigned long base_lra(unsigned long address
)
385 : "=d" (real
) : "a" (address
) : "cc");
389 static int base_page_walk(unsigned long origin
, unsigned long addr
,
390 unsigned long end
, int alloc
)
392 unsigned long *pte
, next
;
396 pte
= (unsigned long *) origin
;
397 pte
+= (addr
& _PAGE_INDEX
) >> _PAGE_SHIFT
;
399 next
= base_page_addr_end(addr
, end
);
400 *pte
= base_lra(addr
);
401 } while (pte
++, addr
= next
, addr
< end
);
405 static int base_segment_walk(unsigned long origin
, unsigned long addr
,
406 unsigned long end
, int alloc
)
408 unsigned long *ste
, next
, table
;
411 ste
= (unsigned long *) origin
;
412 ste
+= (addr
& _SEGMENT_INDEX
) >> _SEGMENT_SHIFT
;
414 next
= base_segment_addr_end(addr
, end
);
415 if (*ste
& _SEGMENT_ENTRY_INVALID
) {
418 table
= base_pgt_alloc();
421 *ste
= table
| _SEGMENT_ENTRY
;
423 table
= *ste
& _SEGMENT_ENTRY_ORIGIN
;
424 rc
= base_page_walk(table
, addr
, next
, alloc
);
428 base_pgt_free(table
);
430 } while (ste
++, addr
= next
, addr
< end
);
434 static int base_region3_walk(unsigned long origin
, unsigned long addr
,
435 unsigned long end
, int alloc
)
437 unsigned long *rtte
, next
, table
;
440 rtte
= (unsigned long *) origin
;
441 rtte
+= (addr
& _REGION3_INDEX
) >> _REGION3_SHIFT
;
443 next
= base_region3_addr_end(addr
, end
);
444 if (*rtte
& _REGION_ENTRY_INVALID
) {
447 table
= base_crst_alloc(_SEGMENT_ENTRY_EMPTY
);
450 *rtte
= table
| _REGION3_ENTRY
;
452 table
= *rtte
& _REGION_ENTRY_ORIGIN
;
453 rc
= base_segment_walk(table
, addr
, next
, alloc
);
457 base_crst_free(table
);
458 } while (rtte
++, addr
= next
, addr
< end
);
462 static int base_region2_walk(unsigned long origin
, unsigned long addr
,
463 unsigned long end
, int alloc
)
465 unsigned long *rste
, next
, table
;
468 rste
= (unsigned long *) origin
;
469 rste
+= (addr
& _REGION2_INDEX
) >> _REGION2_SHIFT
;
471 next
= base_region2_addr_end(addr
, end
);
472 if (*rste
& _REGION_ENTRY_INVALID
) {
475 table
= base_crst_alloc(_REGION3_ENTRY_EMPTY
);
478 *rste
= table
| _REGION2_ENTRY
;
480 table
= *rste
& _REGION_ENTRY_ORIGIN
;
481 rc
= base_region3_walk(table
, addr
, next
, alloc
);
485 base_crst_free(table
);
486 } while (rste
++, addr
= next
, addr
< end
);
490 static int base_region1_walk(unsigned long origin
, unsigned long addr
,
491 unsigned long end
, int alloc
)
493 unsigned long *rfte
, next
, table
;
496 rfte
= (unsigned long *) origin
;
497 rfte
+= (addr
& _REGION1_INDEX
) >> _REGION1_SHIFT
;
499 next
= base_region1_addr_end(addr
, end
);
500 if (*rfte
& _REGION_ENTRY_INVALID
) {
503 table
= base_crst_alloc(_REGION2_ENTRY_EMPTY
);
506 *rfte
= table
| _REGION1_ENTRY
;
508 table
= *rfte
& _REGION_ENTRY_ORIGIN
;
509 rc
= base_region2_walk(table
, addr
, next
, alloc
);
513 base_crst_free(table
);
514 } while (rfte
++, addr
= next
, addr
< end
);
519 * base_asce_free - free asce and tables returned from base_asce_alloc()
520 * @asce: asce to be freed
522 * Frees all region, segment, and page tables that were allocated with a
523 * corresponding base_asce_alloc() call.
525 void base_asce_free(unsigned long asce
)
527 unsigned long table
= asce
& _ASCE_ORIGIN
;
531 switch (asce
& _ASCE_TYPE_MASK
) {
532 case _ASCE_TYPE_SEGMENT
:
533 base_segment_walk(table
, 0, _REGION3_SIZE
, 0);
535 case _ASCE_TYPE_REGION3
:
536 base_region3_walk(table
, 0, _REGION2_SIZE
, 0);
538 case _ASCE_TYPE_REGION2
:
539 base_region2_walk(table
, 0, _REGION1_SIZE
, 0);
541 case _ASCE_TYPE_REGION1
:
542 base_region1_walk(table
, 0, TASK_SIZE_MAX
, 0);
545 base_crst_free(table
);
548 static int base_pgt_cache_init(void)
550 static DEFINE_MUTEX(base_pgt_cache_mutex
);
551 unsigned long sz
= _PAGE_TABLE_SIZE
;
555 mutex_lock(&base_pgt_cache_mutex
);
557 base_pgt_cache
= kmem_cache_create("base_pgt", sz
, sz
, 0, NULL
);
558 mutex_unlock(&base_pgt_cache_mutex
);
559 return base_pgt_cache
? 0 : -ENOMEM
;
563 * base_asce_alloc - create kernel mapping without enhanced DAT features
564 * @addr: virtual start address of kernel mapping
565 * @num_pages: number of consecutive pages
567 * Generate an asce, including all required region, segment and page tables,
568 * that can be used to access the virtual kernel mapping. The difference is
569 * that the returned asce does not make use of any enhanced DAT features like
570 * e.g. large pages. This is required for some I/O functions that pass an
571 * asce, like e.g. some service call requests.
573 * Note: the returned asce may NEVER be attached to any cpu. It may only be
574 * used for I/O requests. tlb entries that might result because the
575 * asce was attached to a cpu won't be cleared.
577 unsigned long base_asce_alloc(unsigned long addr
, unsigned long num_pages
)
579 unsigned long asce
, table
, end
;
582 if (base_pgt_cache_init())
584 end
= addr
+ num_pages
* PAGE_SIZE
;
585 if (end
<= _REGION3_SIZE
) {
586 table
= base_crst_alloc(_SEGMENT_ENTRY_EMPTY
);
589 rc
= base_segment_walk(table
, addr
, end
, 1);
590 asce
= table
| _ASCE_TYPE_SEGMENT
| _ASCE_TABLE_LENGTH
;
591 } else if (end
<= _REGION2_SIZE
) {
592 table
= base_crst_alloc(_REGION3_ENTRY_EMPTY
);
595 rc
= base_region3_walk(table
, addr
, end
, 1);
596 asce
= table
| _ASCE_TYPE_REGION3
| _ASCE_TABLE_LENGTH
;
597 } else if (end
<= _REGION1_SIZE
) {
598 table
= base_crst_alloc(_REGION2_ENTRY_EMPTY
);
601 rc
= base_region2_walk(table
, addr
, end
, 1);
602 asce
= table
| _ASCE_TYPE_REGION2
| _ASCE_TABLE_LENGTH
;
604 table
= base_crst_alloc(_REGION1_ENTRY_EMPTY
);
607 rc
= base_region1_walk(table
, addr
, end
, 1);
608 asce
= table
| _ASCE_TYPE_REGION1
| _ASCE_TABLE_LENGTH
;
611 base_asce_free(asce
);