]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/sparc/mm/hugetlbpage.c
ASoC: wm_adsp: add support for DSP region lock
[mirror_ubuntu-bionic-kernel.git] / arch / sparc / mm / hugetlbpage.c
1 /*
2 * SPARC64 Huge TLB page support.
3 *
4 * Copyright (C) 2002, 2003, 2006 David S. Miller (davem@davemloft.net)
5 */
6
7 #include <linux/fs.h>
8 #include <linux/mm.h>
9 #include <linux/sched/mm.h>
10 #include <linux/hugetlb.h>
11 #include <linux/pagemap.h>
12 #include <linux/sysctl.h>
13
14 #include <asm/mman.h>
15 #include <asm/pgalloc.h>
16 #include <asm/pgtable.h>
17 #include <asm/tlb.h>
18 #include <asm/tlbflush.h>
19 #include <asm/cacheflush.h>
20 #include <asm/mmu_context.h>
21
22 /* Slightly simplified from the non-hugepage variant because by
23 * definition we don't have to worry about any page coloring stuff
24 */
25
26 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
27 unsigned long addr,
28 unsigned long len,
29 unsigned long pgoff,
30 unsigned long flags)
31 {
32 struct hstate *h = hstate_file(filp);
33 unsigned long task_size = TASK_SIZE;
34 struct vm_unmapped_area_info info;
35
36 if (test_thread_flag(TIF_32BIT))
37 task_size = STACK_TOP32;
38
39 info.flags = 0;
40 info.length = len;
41 info.low_limit = TASK_UNMAPPED_BASE;
42 info.high_limit = min(task_size, VA_EXCLUDE_START);
43 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
44 info.align_offset = 0;
45 addr = vm_unmapped_area(&info);
46
47 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
48 VM_BUG_ON(addr != -ENOMEM);
49 info.low_limit = VA_EXCLUDE_END;
50 info.high_limit = task_size;
51 addr = vm_unmapped_area(&info);
52 }
53
54 return addr;
55 }
56
57 static unsigned long
58 hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
59 const unsigned long len,
60 const unsigned long pgoff,
61 const unsigned long flags)
62 {
63 struct hstate *h = hstate_file(filp);
64 struct mm_struct *mm = current->mm;
65 unsigned long addr = addr0;
66 struct vm_unmapped_area_info info;
67
68 /* This should only ever run for 32-bit processes. */
69 BUG_ON(!test_thread_flag(TIF_32BIT));
70
71 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
72 info.length = len;
73 info.low_limit = PAGE_SIZE;
74 info.high_limit = mm->mmap_base;
75 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
76 info.align_offset = 0;
77 addr = vm_unmapped_area(&info);
78
79 /*
80 * A failed mmap() very likely causes application failure,
81 * so fall back to the bottom-up function here. This scenario
82 * can happen with large stack limits and large mmap()
83 * allocations.
84 */
85 if (addr & ~PAGE_MASK) {
86 VM_BUG_ON(addr != -ENOMEM);
87 info.flags = 0;
88 info.low_limit = TASK_UNMAPPED_BASE;
89 info.high_limit = STACK_TOP32;
90 addr = vm_unmapped_area(&info);
91 }
92
93 return addr;
94 }
95
96 unsigned long
97 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
98 unsigned long len, unsigned long pgoff, unsigned long flags)
99 {
100 struct hstate *h = hstate_file(file);
101 struct mm_struct *mm = current->mm;
102 struct vm_area_struct *vma;
103 unsigned long task_size = TASK_SIZE;
104
105 if (test_thread_flag(TIF_32BIT))
106 task_size = STACK_TOP32;
107
108 if (len & ~huge_page_mask(h))
109 return -EINVAL;
110 if (len > task_size)
111 return -ENOMEM;
112
113 if (flags & MAP_FIXED) {
114 if (prepare_hugepage_range(file, addr, len))
115 return -EINVAL;
116 return addr;
117 }
118
119 if (addr) {
120 addr = ALIGN(addr, huge_page_size(h));
121 vma = find_vma(mm, addr);
122 if (task_size - len >= addr &&
123 (!vma || addr + len <= vma->vm_start))
124 return addr;
125 }
126 if (mm->get_unmapped_area == arch_get_unmapped_area)
127 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
128 pgoff, flags);
129 else
130 return hugetlb_get_unmapped_area_topdown(file, addr, len,
131 pgoff, flags);
132 }
133
134 static pte_t sun4u_hugepage_shift_to_tte(pte_t entry, unsigned int shift)
135 {
136 return entry;
137 }
138
139 static pte_t sun4v_hugepage_shift_to_tte(pte_t entry, unsigned int shift)
140 {
141 unsigned long hugepage_size = _PAGE_SZ4MB_4V;
142
143 pte_val(entry) = pte_val(entry) & ~_PAGE_SZALL_4V;
144
145 switch (shift) {
146 case HPAGE_256MB_SHIFT:
147 hugepage_size = _PAGE_SZ256MB_4V;
148 pte_val(entry) |= _PAGE_PMD_HUGE;
149 break;
150 case HPAGE_SHIFT:
151 pte_val(entry) |= _PAGE_PMD_HUGE;
152 break;
153 case HPAGE_64K_SHIFT:
154 hugepage_size = _PAGE_SZ64K_4V;
155 break;
156 default:
157 WARN_ONCE(1, "unsupported hugepage shift=%u\n", shift);
158 }
159
160 pte_val(entry) = pte_val(entry) | hugepage_size;
161 return entry;
162 }
163
164 static pte_t hugepage_shift_to_tte(pte_t entry, unsigned int shift)
165 {
166 if (tlb_type == hypervisor)
167 return sun4v_hugepage_shift_to_tte(entry, shift);
168 else
169 return sun4u_hugepage_shift_to_tte(entry, shift);
170 }
171
172 pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
173 struct page *page, int writeable)
174 {
175 unsigned int shift = huge_page_shift(hstate_vma(vma));
176
177 return hugepage_shift_to_tte(entry, shift);
178 }
179
180 static unsigned int sun4v_huge_tte_to_shift(pte_t entry)
181 {
182 unsigned long tte_szbits = pte_val(entry) & _PAGE_SZALL_4V;
183 unsigned int shift;
184
185 switch (tte_szbits) {
186 case _PAGE_SZ256MB_4V:
187 shift = HPAGE_256MB_SHIFT;
188 break;
189 case _PAGE_SZ4MB_4V:
190 shift = REAL_HPAGE_SHIFT;
191 break;
192 case _PAGE_SZ64K_4V:
193 shift = HPAGE_64K_SHIFT;
194 break;
195 default:
196 shift = PAGE_SHIFT;
197 break;
198 }
199 return shift;
200 }
201
202 static unsigned int sun4u_huge_tte_to_shift(pte_t entry)
203 {
204 unsigned long tte_szbits = pte_val(entry) & _PAGE_SZALL_4U;
205 unsigned int shift;
206
207 switch (tte_szbits) {
208 case _PAGE_SZ256MB_4U:
209 shift = HPAGE_256MB_SHIFT;
210 break;
211 case _PAGE_SZ4MB_4U:
212 shift = REAL_HPAGE_SHIFT;
213 break;
214 case _PAGE_SZ64K_4U:
215 shift = HPAGE_64K_SHIFT;
216 break;
217 default:
218 shift = PAGE_SHIFT;
219 break;
220 }
221 return shift;
222 }
223
224 static unsigned int huge_tte_to_shift(pte_t entry)
225 {
226 unsigned long shift;
227
228 if (tlb_type == hypervisor)
229 shift = sun4v_huge_tte_to_shift(entry);
230 else
231 shift = sun4u_huge_tte_to_shift(entry);
232
233 if (shift == PAGE_SHIFT)
234 WARN_ONCE(1, "tto_to_shift: invalid hugepage tte=0x%lx\n",
235 pte_val(entry));
236
237 return shift;
238 }
239
240 static unsigned long huge_tte_to_size(pte_t pte)
241 {
242 unsigned long size = 1UL << huge_tte_to_shift(pte);
243
244 if (size == REAL_HPAGE_SIZE)
245 size = HPAGE_SIZE;
246 return size;
247 }
248
249 pte_t *huge_pte_alloc(struct mm_struct *mm,
250 unsigned long addr, unsigned long sz)
251 {
252 pgd_t *pgd;
253 pud_t *pud;
254 pmd_t *pmd;
255 pte_t *pte = NULL;
256
257 pgd = pgd_offset(mm, addr);
258 pud = pud_alloc(mm, pgd, addr);
259 if (pud) {
260 pmd = pmd_alloc(mm, pud, addr);
261 if (!pmd)
262 return NULL;
263
264 if (sz == PMD_SHIFT)
265 pte = (pte_t *)pmd;
266 else
267 pte = pte_alloc_map(mm, pmd, addr);
268 }
269
270 return pte;
271 }
272
273 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
274 {
275 pgd_t *pgd;
276 pud_t *pud;
277 pmd_t *pmd;
278 pte_t *pte = NULL;
279
280 pgd = pgd_offset(mm, addr);
281 if (!pgd_none(*pgd)) {
282 pud = pud_offset(pgd, addr);
283 if (!pud_none(*pud)) {
284 pmd = pmd_offset(pud, addr);
285 if (!pmd_none(*pmd)) {
286 if (is_hugetlb_pmd(*pmd))
287 pte = (pte_t *)pmd;
288 else
289 pte = pte_offset_map(pmd, addr);
290 }
291 }
292 }
293
294 return pte;
295 }
296
297 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
298 pte_t *ptep, pte_t entry)
299 {
300 unsigned int i, nptes, orig_shift, shift;
301 unsigned long size;
302 pte_t orig;
303
304 size = huge_tte_to_size(entry);
305 shift = size >= HPAGE_SIZE ? PMD_SHIFT : PAGE_SHIFT;
306 nptes = size >> shift;
307
308 if (!pte_present(*ptep) && pte_present(entry))
309 mm->context.hugetlb_pte_count += nptes;
310
311 addr &= ~(size - 1);
312 orig = *ptep;
313 orig_shift = pte_none(orig) ? PAGE_SHIFT : huge_tte_to_shift(orig);
314
315 for (i = 0; i < nptes; i++)
316 ptep[i] = __pte(pte_val(entry) + (i << shift));
317
318 maybe_tlb_batch_add(mm, addr, ptep, orig, 0, orig_shift);
319 /* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */
320 if (size == HPAGE_SIZE)
321 maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, orig, 0,
322 orig_shift);
323 }
324
325 pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
326 pte_t *ptep)
327 {
328 unsigned int i, nptes, hugepage_shift;
329 unsigned long size;
330 pte_t entry;
331
332 entry = *ptep;
333 size = huge_tte_to_size(entry);
334 if (size >= HPAGE_SIZE)
335 nptes = size >> PMD_SHIFT;
336 else
337 nptes = size >> PAGE_SHIFT;
338
339 hugepage_shift = pte_none(entry) ? PAGE_SHIFT :
340 huge_tte_to_shift(entry);
341
342 if (pte_present(entry))
343 mm->context.hugetlb_pte_count -= nptes;
344
345 addr &= ~(size - 1);
346 for (i = 0; i < nptes; i++)
347 ptep[i] = __pte(0UL);
348
349 maybe_tlb_batch_add(mm, addr, ptep, entry, 0, hugepage_shift);
350 /* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */
351 if (size == HPAGE_SIZE)
352 maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, entry, 0,
353 hugepage_shift);
354
355 return entry;
356 }
357
358 int pmd_huge(pmd_t pmd)
359 {
360 return !pmd_none(pmd) &&
361 (pmd_val(pmd) & (_PAGE_VALID|_PAGE_PMD_HUGE)) != _PAGE_VALID;
362 }
363
364 int pud_huge(pud_t pud)
365 {
366 return 0;
367 }
368
369 static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
370 unsigned long addr)
371 {
372 pgtable_t token = pmd_pgtable(*pmd);
373
374 pmd_clear(pmd);
375 pte_free_tlb(tlb, token, addr);
376 atomic_long_dec(&tlb->mm->nr_ptes);
377 }
378
379 static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
380 unsigned long addr, unsigned long end,
381 unsigned long floor, unsigned long ceiling)
382 {
383 pmd_t *pmd;
384 unsigned long next;
385 unsigned long start;
386
387 start = addr;
388 pmd = pmd_offset(pud, addr);
389 do {
390 next = pmd_addr_end(addr, end);
391 if (pmd_none(*pmd))
392 continue;
393 if (is_hugetlb_pmd(*pmd))
394 pmd_clear(pmd);
395 else
396 hugetlb_free_pte_range(tlb, pmd, addr);
397 } while (pmd++, addr = next, addr != end);
398
399 start &= PUD_MASK;
400 if (start < floor)
401 return;
402 if (ceiling) {
403 ceiling &= PUD_MASK;
404 if (!ceiling)
405 return;
406 }
407 if (end - 1 > ceiling - 1)
408 return;
409
410 pmd = pmd_offset(pud, start);
411 pud_clear(pud);
412 pmd_free_tlb(tlb, pmd, start);
413 mm_dec_nr_pmds(tlb->mm);
414 }
415
416 static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
417 unsigned long addr, unsigned long end,
418 unsigned long floor, unsigned long ceiling)
419 {
420 pud_t *pud;
421 unsigned long next;
422 unsigned long start;
423
424 start = addr;
425 pud = pud_offset(pgd, addr);
426 do {
427 next = pud_addr_end(addr, end);
428 if (pud_none_or_clear_bad(pud))
429 continue;
430 hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
431 ceiling);
432 } while (pud++, addr = next, addr != end);
433
434 start &= PGDIR_MASK;
435 if (start < floor)
436 return;
437 if (ceiling) {
438 ceiling &= PGDIR_MASK;
439 if (!ceiling)
440 return;
441 }
442 if (end - 1 > ceiling - 1)
443 return;
444
445 pud = pud_offset(pgd, start);
446 pgd_clear(pgd);
447 pud_free_tlb(tlb, pud, start);
448 }
449
450 void hugetlb_free_pgd_range(struct mmu_gather *tlb,
451 unsigned long addr, unsigned long end,
452 unsigned long floor, unsigned long ceiling)
453 {
454 pgd_t *pgd;
455 unsigned long next;
456
457 pgd = pgd_offset(tlb->mm, addr);
458 do {
459 next = pgd_addr_end(addr, end);
460 if (pgd_none_or_clear_bad(pgd))
461 continue;
462 hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
463 } while (pgd++, addr = next, addr != end);
464 }