]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/powerpc/mm/subpage-prot.c
Merge tag 'for-linus-20170825' of git://git.infradead.org/linux-mtd
[mirror_ubuntu-artful-kernel.git] / arch / powerpc / mm / subpage-prot.c
CommitLineData
fa28237c
PM
1/*
2 * Copyright 2007-2008 Paul Mackerras, IBM Corp.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/errno.h>
11#include <linux/kernel.h>
12#include <linux/gfp.h>
fa28237c
PM
13#include <linux/types.h>
14#include <linux/mm.h>
15#include <linux/hugetlb.h>
16
17#include <asm/pgtable.h>
7c0f6ba6 18#include <linux/uaccess.h>
fa28237c
PM
19#include <asm/tlbflush.h>
20
21/*
22 * Free all pages allocated for subpage protection maps and pointers.
23 * Also makes sure that the subpage_prot_table structure is
24 * reinitialized for the next user.
25 */
d28513bc 26void subpage_prot_free(struct mm_struct *mm)
fa28237c 27{
d28513bc 28 struct subpage_prot_table *spt = &mm->context.spt;
fa28237c
PM
29 unsigned long i, j, addr;
30 u32 **p;
31
32 for (i = 0; i < 4; ++i) {
33 if (spt->low_prot[i]) {
34 free_page((unsigned long)spt->low_prot[i]);
35 spt->low_prot[i] = NULL;
36 }
37 }
38 addr = 0;
0da12a7a 39 for (i = 0; i < (TASK_SIZE_USER64 >> 43); ++i) {
fa28237c
PM
40 p = spt->protptrs[i];
41 if (!p)
42 continue;
43 spt->protptrs[i] = NULL;
44 for (j = 0; j < SBP_L2_COUNT && addr < spt->maxaddr;
45 ++j, addr += PAGE_SIZE)
46 if (p[j])
47 free_page((unsigned long)p[j]);
48 free_page((unsigned long)p);
49 }
50 spt->maxaddr = 0;
51}
52
d28513bc
DG
53void subpage_prot_init_new_context(struct mm_struct *mm)
54{
55 struct subpage_prot_table *spt = &mm->context.spt;
56
57 memset(spt, 0, sizeof(*spt));
58}
59
fa28237c
PM
60static void hpte_flush_range(struct mm_struct *mm, unsigned long addr,
61 int npages)
62{
63 pgd_t *pgd;
64 pud_t *pud;
65 pmd_t *pmd;
66 pte_t *pte;
67 spinlock_t *ptl;
68
69 pgd = pgd_offset(mm, addr);
70 if (pgd_none(*pgd))
71 return;
72 pud = pud_offset(pgd, addr);
73 if (pud_none(*pud))
74 return;
75 pmd = pmd_offset(pud, addr);
76 if (pmd_none(*pmd))
77 return;
78 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
79 arch_enter_lazy_mmu_mode();
80 for (; npages > 0; --npages) {
88247e8d 81 pte_update(mm, addr, pte, 0, 0, 0);
fa28237c
PM
82 addr += PAGE_SIZE;
83 ++pte;
84 }
85 arch_leave_lazy_mmu_mode();
86 pte_unmap_unlock(pte - 1, ptl);
87}
88
89/*
90 * Clear the subpage protection map for an address range, allowing
91 * all accesses that are allowed by the pte permissions.
92 */
93static void subpage_prot_clear(unsigned long addr, unsigned long len)
94{
95 struct mm_struct *mm = current->mm;
d28513bc 96 struct subpage_prot_table *spt = &mm->context.spt;
fa28237c 97 u32 **spm, *spp;
6b5e7229
JM
98 unsigned long i;
99 size_t nw;
fa28237c
PM
100 unsigned long next, limit;
101
102 down_write(&mm->mmap_sem);
103 limit = addr + len;
104 if (limit > spt->maxaddr)
105 limit = spt->maxaddr;
106 for (; addr < limit; addr = next) {
107 next = pmd_addr_end(addr, limit);
b0d436c7 108 if (addr < 0x100000000UL) {
fa28237c
PM
109 spm = spt->low_prot;
110 } else {
111 spm = spt->protptrs[addr >> SBP_L3_SHIFT];
112 if (!spm)
113 continue;
114 }
115 spp = spm[(addr >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1)];
116 if (!spp)
117 continue;
118 spp += (addr >> PAGE_SHIFT) & (SBP_L1_COUNT - 1);
119
120 i = (addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
121 nw = PTRS_PER_PTE - i;
122 if (addr + (nw << PAGE_SHIFT) > next)
123 nw = (next - addr) >> PAGE_SHIFT;
124
125 memset(spp, 0, nw * sizeof(u32));
126
127 /* now flush any existing HPTEs for the range */
128 hpte_flush_range(mm, addr, nw);
129 }
130 up_write(&mm->mmap_sem);
131}
132
d8e355a2
AK
133#ifdef CONFIG_TRANSPARENT_HUGEPAGE
134static int subpage_walk_pmd_entry(pmd_t *pmd, unsigned long addr,
135 unsigned long end, struct mm_walk *walk)
136{
1757bbd9 137 struct vm_area_struct *vma = walk->vma;
78ddc534 138 split_huge_pmd(vma, pmd, addr);
d8e355a2
AK
139 return 0;
140}
141
142static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr,
143 unsigned long len)
144{
145 struct vm_area_struct *vma;
146 struct mm_walk subpage_proto_walk = {
147 .mm = mm,
148 .pmd_entry = subpage_walk_pmd_entry,
149 };
150
151 /*
152 * We don't try too hard, we just mark all the vma in that range
153 * VM_NOHUGEPAGE and split them.
154 */
155 vma = find_vma(mm, addr);
156 /*
157 * If the range is in unmapped range, just return
158 */
159 if (vma && ((addr + len) <= vma->vm_start))
160 return;
161
162 while (vma) {
163 if (vma->vm_start >= (addr + len))
164 break;
165 vma->vm_flags |= VM_NOHUGEPAGE;
1757bbd9 166 walk_page_vma(vma, &subpage_proto_walk);
d8e355a2
AK
167 vma = vma->vm_next;
168 }
169}
170#else
171static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr,
172 unsigned long len)
173{
174 return;
175}
176#endif
177
fa28237c
PM
178/*
179 * Copy in a subpage protection map for an address range.
180 * The map has 2 bits per 4k subpage, so 32 bits per 64k page.
181 * Each 2-bit field is 0 to allow any access, 1 to prevent writes,
182 * 2 or 3 to prevent all accesses.
183 * Note that the normal page protections also apply; the subpage
184 * protection mechanism is an additional constraint, so putting 0
185 * in a 2-bit field won't allow writes to a page that is otherwise
186 * write-protected.
187 */
188long sys_subpage_prot(unsigned long addr, unsigned long len, u32 __user *map)
189{
190 struct mm_struct *mm = current->mm;
d28513bc 191 struct subpage_prot_table *spt = &mm->context.spt;
fa28237c 192 u32 **spm, *spp;
6b5e7229
JM
193 unsigned long i;
194 size_t nw;
fa28237c
PM
195 unsigned long next, limit;
196 int err;
197
198 /* Check parameters */
199 if ((addr & ~PAGE_MASK) || (len & ~PAGE_MASK) ||
be77e999
AK
200 addr >= mm->task_size || len >= mm->task_size ||
201 addr + len > mm->task_size)
fa28237c
PM
202 return -EINVAL;
203
204 if (is_hugepage_only_range(mm, addr, len))
205 return -EINVAL;
206
207 if (!map) {
208 /* Clear out the protection map for the address range */
209 subpage_prot_clear(addr, len);
210 return 0;
211 }
212
213 if (!access_ok(VERIFY_READ, map, (len >> PAGE_SHIFT) * sizeof(u32)))
214 return -EFAULT;
215
216 down_write(&mm->mmap_sem);
d8e355a2 217 subpage_mark_vma_nohuge(mm, addr, len);
fa28237c
PM
218 for (limit = addr + len; addr < limit; addr = next) {
219 next = pmd_addr_end(addr, limit);
220 err = -ENOMEM;
b0d436c7 221 if (addr < 0x100000000UL) {
fa28237c
PM
222 spm = spt->low_prot;
223 } else {
224 spm = spt->protptrs[addr >> SBP_L3_SHIFT];
225 if (!spm) {
226 spm = (u32 **)get_zeroed_page(GFP_KERNEL);
227 if (!spm)
228 goto out;
229 spt->protptrs[addr >> SBP_L3_SHIFT] = spm;
230 }
231 }
232 spm += (addr >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1);
233 spp = *spm;
234 if (!spp) {
235 spp = (u32 *)get_zeroed_page(GFP_KERNEL);
236 if (!spp)
237 goto out;
238 *spm = spp;
239 }
240 spp += (addr >> PAGE_SHIFT) & (SBP_L1_COUNT - 1);
241
242 local_irq_disable();
243 demote_segment_4k(mm, addr);
244 local_irq_enable();
245
246 i = (addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
247 nw = PTRS_PER_PTE - i;
248 if (addr + (nw << PAGE_SHIFT) > next)
249 nw = (next - addr) >> PAGE_SHIFT;
250
251 up_write(&mm->mmap_sem);
fa28237c 252 if (__copy_from_user(spp, map, nw * sizeof(u32)))
a967f161 253 return -EFAULT;
fa28237c
PM
254 map += nw;
255 down_write(&mm->mmap_sem);
256
257 /* now flush any existing HPTEs for the range */
258 hpte_flush_range(mm, addr, nw);
259 }
260 if (limit > spt->maxaddr)
261 spt->maxaddr = limit;
262 err = 0;
263 out:
264 up_write(&mm->mmap_sem);
fa28237c
PM
265 return err;
266}