]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - include/asm-s390/pgalloc.h
[S390] Add four level page tables for CONFIG_64BIT=y.
[mirror_ubuntu-zesty-kernel.git] / include / asm-s390 / pgalloc.h
1 /*
2 * include/asm-s390/pgalloc.h
3 *
4 * S390 version
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Hartmut Penner (hp@de.ibm.com)
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 *
9 * Derived from "include/asm-i386/pgalloc.h"
10 * Copyright (C) 1994 Linus Torvalds
11 */
12
13 #ifndef _S390_PGALLOC_H
14 #define _S390_PGALLOC_H
15
16 #include <linux/threads.h>
17 #include <linux/gfp.h>
18 #include <linux/mm.h>
19
20 #define check_pgt_cache() do {} while (0)
21
22 unsigned long *crst_table_alloc(struct mm_struct *, int);
23 void crst_table_free(struct mm_struct *, unsigned long *);
24
25 unsigned long *page_table_alloc(struct mm_struct *);
26 void page_table_free(struct mm_struct *, unsigned long *);
27 void disable_noexec(struct mm_struct *, struct task_struct *);
28
29 static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
30 {
31 *s = val;
32 n = (n / 256) - 1;
33 asm volatile(
34 #ifdef CONFIG_64BIT
35 " mvc 8(248,%0),0(%0)\n"
36 #else
37 " mvc 4(252,%0),0(%0)\n"
38 #endif
39 "0: mvc 256(256,%0),0(%0)\n"
40 " la %0,256(%0)\n"
41 " brct %1,0b\n"
42 : "+a" (s), "+d" (n));
43 }
44
45 static inline void crst_table_init(unsigned long *crst, unsigned long entry)
46 {
47 clear_table(crst, entry, sizeof(unsigned long)*2048);
48 crst = get_shadow_table(crst);
49 if (crst)
50 clear_table(crst, entry, sizeof(unsigned long)*2048);
51 }
52
53 #ifndef __s390x__
54
55 static inline unsigned long pgd_entry_type(struct mm_struct *mm)
56 {
57 return _SEGMENT_ENTRY_EMPTY;
58 }
59
60 #define pud_alloc_one(mm,address) ({ BUG(); ((pud_t *)2); })
61 #define pud_free(mm, x) do { } while (0)
62
63 #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); })
64 #define pmd_free(mm, x) do { } while (0)
65
66 #define pgd_populate(mm, pgd, pud) BUG()
67 #define pgd_populate_kernel(mm, pgd, pud) BUG()
68
69 #define pud_populate(mm, pud, pmd) BUG()
70 #define pud_populate_kernel(mm, pud, pmd) BUG()
71
72 #else /* __s390x__ */
73
74 static inline unsigned long pgd_entry_type(struct mm_struct *mm)
75 {
76 return _REGION2_ENTRY_EMPTY;
77 }
78
79 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
80 {
81 unsigned long *table = crst_table_alloc(mm, mm->context.noexec);
82 if (table)
83 crst_table_init(table, _REGION3_ENTRY_EMPTY);
84 return (pud_t *) table;
85 }
86 #define pud_free(mm, pud) crst_table_free(mm, (unsigned long *) pud)
87
88 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
89 {
90 unsigned long *table = crst_table_alloc(mm, mm->context.noexec);
91 if (table)
92 crst_table_init(table, _SEGMENT_ENTRY_EMPTY);
93 return (pmd_t *) table;
94 }
95 #define pmd_free(mm, pmd) crst_table_free(mm, (unsigned long *) pmd)
96
97 static inline void pgd_populate_kernel(struct mm_struct *mm,
98 pgd_t *pgd, pud_t *pud)
99 {
100 pgd_val(*pgd) = _REGION2_ENTRY | __pa(pud);
101 }
102
103 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
104 {
105 pgd_t *shadow_pgd = get_shadow_table(pgd);
106 pud_t *shadow_pud = get_shadow_table(pud);
107
108 if (shadow_pgd && shadow_pud)
109 pgd_populate_kernel(mm, shadow_pgd, shadow_pud);
110 pgd_populate_kernel(mm, pgd, pud);
111 }
112
113 static inline void pud_populate_kernel(struct mm_struct *mm,
114 pud_t *pud, pmd_t *pmd)
115 {
116 pud_val(*pud) = _REGION3_ENTRY | __pa(pmd);
117 }
118
119 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
120 {
121 pud_populate_kernel(mm, pud, pmd);
122 if (mm->context.noexec) {
123 pud = get_shadow_table(pud);
124 pmd = get_shadow_table(pmd);
125 pud_populate_kernel(mm, pud, pmd);
126 }
127 }
128
129 #endif /* __s390x__ */
130
131 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
132 {
133 unsigned long *crst;
134
135 INIT_LIST_HEAD(&mm->context.crst_list);
136 INIT_LIST_HEAD(&mm->context.pgtable_list);
137 crst = crst_table_alloc(mm, s390_noexec);
138 if (crst)
139 crst_table_init(crst, pgd_entry_type(mm));
140 return (pgd_t *) crst;
141 }
142 #define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd)
143
144 static inline void pmd_populate_kernel(struct mm_struct *mm,
145 pmd_t *pmd, pte_t *pte)
146 {
147 pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte);
148 }
149
150 static inline void pmd_populate(struct mm_struct *mm,
151 pmd_t *pmd, pgtable_t pte)
152 {
153 pmd_populate_kernel(mm, pmd, pte);
154 if (mm->context.noexec) {
155 pmd = get_shadow_table(pmd);
156 pmd_populate_kernel(mm, pmd, pte + PTRS_PER_PTE);
157 }
158 }
159
160 #define pmd_pgtable(pmd) \
161 (pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE)
162
163 /*
164 * page table entry allocation/free routines.
165 */
166 #define pte_alloc_one_kernel(mm, vmaddr) ((pte_t *) page_table_alloc(mm))
167 #define pte_alloc_one(mm, vmaddr) ((pte_t *) page_table_alloc(mm))
168
169 #define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte)
170 #define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte)
171
172 #endif /* _S390_PGALLOC_H */