]>
Commit | Line | Data |
---|---|---|
fa28237c PM |
1 | /* |
2 | * Copyright 2007-2008 Paul Mackerras, IBM Corp. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation; either version | |
7 | * 2 of the License, or (at your option) any later version. | |
8 | */ | |
9 | ||
10 | #include <linux/errno.h> | |
11 | #include <linux/kernel.h> | |
12 | #include <linux/gfp.h> | |
fa28237c PM |
13 | #include <linux/types.h> |
14 | #include <linux/mm.h> | |
15 | #include <linux/hugetlb.h> | |
16 | ||
17 | #include <asm/pgtable.h> | |
18 | #include <asm/uaccess.h> | |
19 | #include <asm/tlbflush.h> | |
20 | ||
21 | /* | |
22 | * Free all pages allocated for subpage protection maps and pointers. | |
23 | * Also makes sure that the subpage_prot_table structure is | |
24 | * reinitialized for the next user. | |
25 | */ | |
d28513bc | 26 | void subpage_prot_free(struct mm_struct *mm) |
fa28237c | 27 | { |
d28513bc | 28 | struct subpage_prot_table *spt = &mm->context.spt; |
fa28237c PM |
29 | unsigned long i, j, addr; |
30 | u32 **p; | |
31 | ||
32 | for (i = 0; i < 4; ++i) { | |
33 | if (spt->low_prot[i]) { | |
34 | free_page((unsigned long)spt->low_prot[i]); | |
35 | spt->low_prot[i] = NULL; | |
36 | } | |
37 | } | |
38 | addr = 0; | |
39 | for (i = 0; i < 2; ++i) { | |
40 | p = spt->protptrs[i]; | |
41 | if (!p) | |
42 | continue; | |
43 | spt->protptrs[i] = NULL; | |
44 | for (j = 0; j < SBP_L2_COUNT && addr < spt->maxaddr; | |
45 | ++j, addr += PAGE_SIZE) | |
46 | if (p[j]) | |
47 | free_page((unsigned long)p[j]); | |
48 | free_page((unsigned long)p); | |
49 | } | |
50 | spt->maxaddr = 0; | |
51 | } | |
52 | ||
d28513bc DG |
53 | void subpage_prot_init_new_context(struct mm_struct *mm) |
54 | { | |
55 | struct subpage_prot_table *spt = &mm->context.spt; | |
56 | ||
57 | memset(spt, 0, sizeof(*spt)); | |
58 | } | |
59 | ||
fa28237c PM |
60 | static void hpte_flush_range(struct mm_struct *mm, unsigned long addr, |
61 | int npages) | |
62 | { | |
63 | pgd_t *pgd; | |
64 | pud_t *pud; | |
65 | pmd_t *pmd; | |
66 | pte_t *pte; | |
67 | spinlock_t *ptl; | |
68 | ||
69 | pgd = pgd_offset(mm, addr); | |
70 | if (pgd_none(*pgd)) | |
71 | return; | |
72 | pud = pud_offset(pgd, addr); | |
73 | if (pud_none(*pud)) | |
74 | return; | |
75 | pmd = pmd_offset(pud, addr); | |
76 | if (pmd_none(*pmd)) | |
77 | return; | |
78 | pte = pte_offset_map_lock(mm, pmd, addr, &ptl); | |
79 | arch_enter_lazy_mmu_mode(); | |
80 | for (; npages > 0; --npages) { | |
88247e8d | 81 | pte_update(mm, addr, pte, 0, 0, 0); |
fa28237c PM |
82 | addr += PAGE_SIZE; |
83 | ++pte; | |
84 | } | |
85 | arch_leave_lazy_mmu_mode(); | |
86 | pte_unmap_unlock(pte - 1, ptl); | |
87 | } | |
88 | ||
89 | /* | |
90 | * Clear the subpage protection map for an address range, allowing | |
91 | * all accesses that are allowed by the pte permissions. | |
92 | */ | |
93 | static void subpage_prot_clear(unsigned long addr, unsigned long len) | |
94 | { | |
95 | struct mm_struct *mm = current->mm; | |
d28513bc | 96 | struct subpage_prot_table *spt = &mm->context.spt; |
fa28237c | 97 | u32 **spm, *spp; |
6b5e7229 JM |
98 | unsigned long i; |
99 | size_t nw; | |
fa28237c PM |
100 | unsigned long next, limit; |
101 | ||
102 | down_write(&mm->mmap_sem); | |
103 | limit = addr + len; | |
104 | if (limit > spt->maxaddr) | |
105 | limit = spt->maxaddr; | |
106 | for (; addr < limit; addr = next) { | |
107 | next = pmd_addr_end(addr, limit); | |
b0d436c7 | 108 | if (addr < 0x100000000UL) { |
fa28237c PM |
109 | spm = spt->low_prot; |
110 | } else { | |
111 | spm = spt->protptrs[addr >> SBP_L3_SHIFT]; | |
112 | if (!spm) | |
113 | continue; | |
114 | } | |
115 | spp = spm[(addr >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1)]; | |
116 | if (!spp) | |
117 | continue; | |
118 | spp += (addr >> PAGE_SHIFT) & (SBP_L1_COUNT - 1); | |
119 | ||
120 | i = (addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); | |
121 | nw = PTRS_PER_PTE - i; | |
122 | if (addr + (nw << PAGE_SHIFT) > next) | |
123 | nw = (next - addr) >> PAGE_SHIFT; | |
124 | ||
125 | memset(spp, 0, nw * sizeof(u32)); | |
126 | ||
127 | /* now flush any existing HPTEs for the range */ | |
128 | hpte_flush_range(mm, addr, nw); | |
129 | } | |
130 | up_write(&mm->mmap_sem); | |
131 | } | |
132 | ||
d8e355a2 AK |
133 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
134 | static int subpage_walk_pmd_entry(pmd_t *pmd, unsigned long addr, | |
135 | unsigned long end, struct mm_walk *walk) | |
136 | { | |
137 | struct vm_area_struct *vma = walk->private; | |
138 | split_huge_page_pmd(vma, addr, pmd); | |
139 | return 0; | |
140 | } | |
141 | ||
142 | static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr, | |
143 | unsigned long len) | |
144 | { | |
145 | struct vm_area_struct *vma; | |
146 | struct mm_walk subpage_proto_walk = { | |
147 | .mm = mm, | |
148 | .pmd_entry = subpage_walk_pmd_entry, | |
149 | }; | |
150 | ||
151 | /* | |
152 | * We don't try too hard, we just mark all the vma in that range | |
153 | * VM_NOHUGEPAGE and split them. | |
154 | */ | |
155 | vma = find_vma(mm, addr); | |
156 | /* | |
157 | * If the range is in unmapped range, just return | |
158 | */ | |
159 | if (vma && ((addr + len) <= vma->vm_start)) | |
160 | return; | |
161 | ||
162 | while (vma) { | |
163 | if (vma->vm_start >= (addr + len)) | |
164 | break; | |
165 | vma->vm_flags |= VM_NOHUGEPAGE; | |
166 | subpage_proto_walk.private = vma; | |
167 | walk_page_range(vma->vm_start, vma->vm_end, | |
168 | &subpage_proto_walk); | |
169 | vma = vma->vm_next; | |
170 | } | |
171 | } | |
172 | #else | |
173 | static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr, | |
174 | unsigned long len) | |
175 | { | |
176 | return; | |
177 | } | |
178 | #endif | |
179 | ||
fa28237c PM |
180 | /* |
181 | * Copy in a subpage protection map for an address range. | |
182 | * The map has 2 bits per 4k subpage, so 32 bits per 64k page. | |
183 | * Each 2-bit field is 0 to allow any access, 1 to prevent writes, | |
184 | * 2 or 3 to prevent all accesses. | |
185 | * Note that the normal page protections also apply; the subpage | |
186 | * protection mechanism is an additional constraint, so putting 0 | |
187 | * in a 2-bit field won't allow writes to a page that is otherwise | |
188 | * write-protected. | |
189 | */ | |
190 | long sys_subpage_prot(unsigned long addr, unsigned long len, u32 __user *map) | |
191 | { | |
192 | struct mm_struct *mm = current->mm; | |
d28513bc | 193 | struct subpage_prot_table *spt = &mm->context.spt; |
fa28237c | 194 | u32 **spm, *spp; |
6b5e7229 JM |
195 | unsigned long i; |
196 | size_t nw; | |
fa28237c PM |
197 | unsigned long next, limit; |
198 | int err; | |
199 | ||
200 | /* Check parameters */ | |
201 | if ((addr & ~PAGE_MASK) || (len & ~PAGE_MASK) || | |
202 | addr >= TASK_SIZE || len >= TASK_SIZE || addr + len > TASK_SIZE) | |
203 | return -EINVAL; | |
204 | ||
205 | if (is_hugepage_only_range(mm, addr, len)) | |
206 | return -EINVAL; | |
207 | ||
208 | if (!map) { | |
209 | /* Clear out the protection map for the address range */ | |
210 | subpage_prot_clear(addr, len); | |
211 | return 0; | |
212 | } | |
213 | ||
214 | if (!access_ok(VERIFY_READ, map, (len >> PAGE_SHIFT) * sizeof(u32))) | |
215 | return -EFAULT; | |
216 | ||
217 | down_write(&mm->mmap_sem); | |
d8e355a2 | 218 | subpage_mark_vma_nohuge(mm, addr, len); |
fa28237c PM |
219 | for (limit = addr + len; addr < limit; addr = next) { |
220 | next = pmd_addr_end(addr, limit); | |
221 | err = -ENOMEM; | |
b0d436c7 | 222 | if (addr < 0x100000000UL) { |
fa28237c PM |
223 | spm = spt->low_prot; |
224 | } else { | |
225 | spm = spt->protptrs[addr >> SBP_L3_SHIFT]; | |
226 | if (!spm) { | |
227 | spm = (u32 **)get_zeroed_page(GFP_KERNEL); | |
228 | if (!spm) | |
229 | goto out; | |
230 | spt->protptrs[addr >> SBP_L3_SHIFT] = spm; | |
231 | } | |
232 | } | |
233 | spm += (addr >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1); | |
234 | spp = *spm; | |
235 | if (!spp) { | |
236 | spp = (u32 *)get_zeroed_page(GFP_KERNEL); | |
237 | if (!spp) | |
238 | goto out; | |
239 | *spm = spp; | |
240 | } | |
241 | spp += (addr >> PAGE_SHIFT) & (SBP_L1_COUNT - 1); | |
242 | ||
243 | local_irq_disable(); | |
244 | demote_segment_4k(mm, addr); | |
245 | local_irq_enable(); | |
246 | ||
247 | i = (addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); | |
248 | nw = PTRS_PER_PTE - i; | |
249 | if (addr + (nw << PAGE_SHIFT) > next) | |
250 | nw = (next - addr) >> PAGE_SHIFT; | |
251 | ||
252 | up_write(&mm->mmap_sem); | |
253 | err = -EFAULT; | |
254 | if (__copy_from_user(spp, map, nw * sizeof(u32))) | |
255 | goto out2; | |
256 | map += nw; | |
257 | down_write(&mm->mmap_sem); | |
258 | ||
259 | /* now flush any existing HPTEs for the range */ | |
260 | hpte_flush_range(mm, addr, nw); | |
261 | } | |
262 | if (limit > spt->maxaddr) | |
263 | spt->maxaddr = limit; | |
264 | err = 0; | |
265 | out: | |
266 | up_write(&mm->mmap_sem); | |
267 | out2: | |
268 | return err; | |
269 | } |