]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - arch/sparc/mm/hugetlbpage.c
ad4b42f0498892a1daad81e76fc31b1d297b91e8
[mirror_ubuntu-jammy-kernel.git] / arch / sparc / mm / hugetlbpage.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * SPARC64 Huge TLB page support.
4 *
5 * Copyright (C) 2002, 2003, 2006 David S. Miller (davem@davemloft.net)
6 */
7
8 #include <linux/fs.h>
9 #include <linux/mm.h>
10 #include <linux/sched/mm.h>
11 #include <linux/hugetlb.h>
12 #include <linux/pagemap.h>
13 #include <linux/sysctl.h>
14
15 #include <asm/mman.h>
16 #include <asm/pgalloc.h>
17 #include <asm/tlb.h>
18 #include <asm/tlbflush.h>
19 #include <asm/cacheflush.h>
20 #include <asm/mmu_context.h>
21
22 /* Slightly simplified from the non-hugepage variant because by
23 * definition we don't have to worry about any page coloring stuff
24 */
25
26 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
27 unsigned long addr,
28 unsigned long len,
29 unsigned long pgoff,
30 unsigned long flags)
31 {
32 struct hstate *h = hstate_file(filp);
33 unsigned long task_size = TASK_SIZE;
34 struct vm_unmapped_area_info info;
35
36 if (test_thread_flag(TIF_32BIT))
37 task_size = STACK_TOP32;
38
39 info.flags = 0;
40 info.length = len;
41 info.low_limit = TASK_UNMAPPED_BASE;
42 info.high_limit = min(task_size, VA_EXCLUDE_START);
43 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
44 info.align_offset = 0;
45 addr = vm_unmapped_area(&info);
46
47 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
48 VM_BUG_ON(addr != -ENOMEM);
49 info.low_limit = VA_EXCLUDE_END;
50 info.high_limit = task_size;
51 addr = vm_unmapped_area(&info);
52 }
53
54 return addr;
55 }
56
57 static unsigned long
58 hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
59 const unsigned long len,
60 const unsigned long pgoff,
61 const unsigned long flags)
62 {
63 struct hstate *h = hstate_file(filp);
64 struct mm_struct *mm = current->mm;
65 unsigned long addr = addr0;
66 struct vm_unmapped_area_info info;
67
68 /* This should only ever run for 32-bit processes. */
69 BUG_ON(!test_thread_flag(TIF_32BIT));
70
71 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
72 info.length = len;
73 info.low_limit = PAGE_SIZE;
74 info.high_limit = mm->mmap_base;
75 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
76 info.align_offset = 0;
77 addr = vm_unmapped_area(&info);
78
79 /*
80 * A failed mmap() very likely causes application failure,
81 * so fall back to the bottom-up function here. This scenario
82 * can happen with large stack limits and large mmap()
83 * allocations.
84 */
85 if (addr & ~PAGE_MASK) {
86 VM_BUG_ON(addr != -ENOMEM);
87 info.flags = 0;
88 info.low_limit = TASK_UNMAPPED_BASE;
89 info.high_limit = STACK_TOP32;
90 addr = vm_unmapped_area(&info);
91 }
92
93 return addr;
94 }
95
96 unsigned long
97 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
98 unsigned long len, unsigned long pgoff, unsigned long flags)
99 {
100 struct hstate *h = hstate_file(file);
101 struct mm_struct *mm = current->mm;
102 struct vm_area_struct *vma;
103 unsigned long task_size = TASK_SIZE;
104
105 if (test_thread_flag(TIF_32BIT))
106 task_size = STACK_TOP32;
107
108 if (len & ~huge_page_mask(h))
109 return -EINVAL;
110 if (len > task_size)
111 return -ENOMEM;
112
113 if (flags & MAP_FIXED) {
114 if (prepare_hugepage_range(file, addr, len))
115 return -EINVAL;
116 return addr;
117 }
118
119 if (addr) {
120 addr = ALIGN(addr, huge_page_size(h));
121 vma = find_vma(mm, addr);
122 if (task_size - len >= addr &&
123 (!vma || addr + len <= vm_start_gap(vma)))
124 return addr;
125 }
126 if (mm->get_unmapped_area == arch_get_unmapped_area)
127 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
128 pgoff, flags);
129 else
130 return hugetlb_get_unmapped_area_topdown(file, addr, len,
131 pgoff, flags);
132 }
133
134 static pte_t sun4u_hugepage_shift_to_tte(pte_t entry, unsigned int shift)
135 {
136 return entry;
137 }
138
139 static pte_t sun4v_hugepage_shift_to_tte(pte_t entry, unsigned int shift)
140 {
141 unsigned long hugepage_size = _PAGE_SZ4MB_4V;
142
143 pte_val(entry) = pte_val(entry) & ~_PAGE_SZALL_4V;
144
145 switch (shift) {
146 case HPAGE_16GB_SHIFT:
147 hugepage_size = _PAGE_SZ16GB_4V;
148 pte_val(entry) |= _PAGE_PUD_HUGE;
149 break;
150 case HPAGE_2GB_SHIFT:
151 hugepage_size = _PAGE_SZ2GB_4V;
152 pte_val(entry) |= _PAGE_PMD_HUGE;
153 break;
154 case HPAGE_256MB_SHIFT:
155 hugepage_size = _PAGE_SZ256MB_4V;
156 pte_val(entry) |= _PAGE_PMD_HUGE;
157 break;
158 case HPAGE_SHIFT:
159 pte_val(entry) |= _PAGE_PMD_HUGE;
160 break;
161 case HPAGE_64K_SHIFT:
162 hugepage_size = _PAGE_SZ64K_4V;
163 break;
164 default:
165 WARN_ONCE(1, "unsupported hugepage shift=%u\n", shift);
166 }
167
168 pte_val(entry) = pte_val(entry) | hugepage_size;
169 return entry;
170 }
171
172 static pte_t hugepage_shift_to_tte(pte_t entry, unsigned int shift)
173 {
174 if (tlb_type == hypervisor)
175 return sun4v_hugepage_shift_to_tte(entry, shift);
176 else
177 return sun4u_hugepage_shift_to_tte(entry, shift);
178 }
179
180 pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
181 struct page *page, int writeable)
182 {
183 unsigned int shift = huge_page_shift(hstate_vma(vma));
184 pte_t pte;
185
186 pte = hugepage_shift_to_tte(entry, shift);
187
188 #ifdef CONFIG_SPARC64
189 /* If this vma has ADI enabled on it, turn on TTE.mcd
190 */
191 if (vma->vm_flags & VM_SPARC_ADI)
192 return pte_mkmcd(pte);
193 else
194 return pte_mknotmcd(pte);
195 #else
196 return pte;
197 #endif
198 }
199
200 static unsigned int sun4v_huge_tte_to_shift(pte_t entry)
201 {
202 unsigned long tte_szbits = pte_val(entry) & _PAGE_SZALL_4V;
203 unsigned int shift;
204
205 switch (tte_szbits) {
206 case _PAGE_SZ16GB_4V:
207 shift = HPAGE_16GB_SHIFT;
208 break;
209 case _PAGE_SZ2GB_4V:
210 shift = HPAGE_2GB_SHIFT;
211 break;
212 case _PAGE_SZ256MB_4V:
213 shift = HPAGE_256MB_SHIFT;
214 break;
215 case _PAGE_SZ4MB_4V:
216 shift = REAL_HPAGE_SHIFT;
217 break;
218 case _PAGE_SZ64K_4V:
219 shift = HPAGE_64K_SHIFT;
220 break;
221 default:
222 shift = PAGE_SHIFT;
223 break;
224 }
225 return shift;
226 }
227
228 static unsigned int sun4u_huge_tte_to_shift(pte_t entry)
229 {
230 unsigned long tte_szbits = pte_val(entry) & _PAGE_SZALL_4U;
231 unsigned int shift;
232
233 switch (tte_szbits) {
234 case _PAGE_SZ256MB_4U:
235 shift = HPAGE_256MB_SHIFT;
236 break;
237 case _PAGE_SZ4MB_4U:
238 shift = REAL_HPAGE_SHIFT;
239 break;
240 case _PAGE_SZ64K_4U:
241 shift = HPAGE_64K_SHIFT;
242 break;
243 default:
244 shift = PAGE_SHIFT;
245 break;
246 }
247 return shift;
248 }
249
250 static unsigned long tte_to_shift(pte_t entry)
251 {
252 if (tlb_type == hypervisor)
253 return sun4v_huge_tte_to_shift(entry);
254
255 return sun4u_huge_tte_to_shift(entry);
256 }
257
258 static unsigned int huge_tte_to_shift(pte_t entry)
259 {
260 unsigned long shift = tte_to_shift(entry);
261
262 if (shift == PAGE_SHIFT)
263 WARN_ONCE(1, "tto_to_shift: invalid hugepage tte=0x%lx\n",
264 pte_val(entry));
265
266 return shift;
267 }
268
269 static unsigned long huge_tte_to_size(pte_t pte)
270 {
271 unsigned long size = 1UL << huge_tte_to_shift(pte);
272
273 if (size == REAL_HPAGE_SIZE)
274 size = HPAGE_SIZE;
275 return size;
276 }
277
278 unsigned long pud_leaf_size(pud_t pud) { return 1UL << tte_to_shift(*(pte_t *)&pud); }
279 unsigned long pmd_leaf_size(pmd_t pmd) { return 1UL << tte_to_shift(*(pte_t *)&pmd); }
280 unsigned long pte_leaf_size(pte_t pte) { return 1UL << tte_to_shift(pte); }
281
282 pte_t *huge_pte_alloc(struct mm_struct *mm,
283 unsigned long addr, unsigned long sz)
284 {
285 pgd_t *pgd;
286 p4d_t *p4d;
287 pud_t *pud;
288 pmd_t *pmd;
289
290 pgd = pgd_offset(mm, addr);
291 p4d = p4d_offset(pgd, addr);
292 pud = pud_alloc(mm, p4d, addr);
293 if (!pud)
294 return NULL;
295 if (sz >= PUD_SIZE)
296 return (pte_t *)pud;
297 pmd = pmd_alloc(mm, pud, addr);
298 if (!pmd)
299 return NULL;
300 if (sz >= PMD_SIZE)
301 return (pte_t *)pmd;
302 return pte_alloc_map(mm, pmd, addr);
303 }
304
305 pte_t *huge_pte_offset(struct mm_struct *mm,
306 unsigned long addr, unsigned long sz)
307 {
308 pgd_t *pgd;
309 p4d_t *p4d;
310 pud_t *pud;
311 pmd_t *pmd;
312
313 pgd = pgd_offset(mm, addr);
314 if (pgd_none(*pgd))
315 return NULL;
316 p4d = p4d_offset(pgd, addr);
317 if (p4d_none(*p4d))
318 return NULL;
319 pud = pud_offset(p4d, addr);
320 if (pud_none(*pud))
321 return NULL;
322 if (is_hugetlb_pud(*pud))
323 return (pte_t *)pud;
324 pmd = pmd_offset(pud, addr);
325 if (pmd_none(*pmd))
326 return NULL;
327 if (is_hugetlb_pmd(*pmd))
328 return (pte_t *)pmd;
329 return pte_offset_map(pmd, addr);
330 }
331
332 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
333 pte_t *ptep, pte_t entry)
334 {
335 unsigned int nptes, orig_shift, shift;
336 unsigned long i, size;
337 pte_t orig;
338
339 size = huge_tte_to_size(entry);
340
341 shift = PAGE_SHIFT;
342 if (size >= PUD_SIZE)
343 shift = PUD_SHIFT;
344 else if (size >= PMD_SIZE)
345 shift = PMD_SHIFT;
346 else
347 shift = PAGE_SHIFT;
348
349 nptes = size >> shift;
350
351 if (!pte_present(*ptep) && pte_present(entry))
352 mm->context.hugetlb_pte_count += nptes;
353
354 addr &= ~(size - 1);
355 orig = *ptep;
356 orig_shift = pte_none(orig) ? PAGE_SHIFT : huge_tte_to_shift(orig);
357
358 for (i = 0; i < nptes; i++)
359 ptep[i] = __pte(pte_val(entry) + (i << shift));
360
361 maybe_tlb_batch_add(mm, addr, ptep, orig, 0, orig_shift);
362 /* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */
363 if (size == HPAGE_SIZE)
364 maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, orig, 0,
365 orig_shift);
366 }
367
368 pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
369 pte_t *ptep)
370 {
371 unsigned int i, nptes, orig_shift, shift;
372 unsigned long size;
373 pte_t entry;
374
375 entry = *ptep;
376 size = huge_tte_to_size(entry);
377
378 shift = PAGE_SHIFT;
379 if (size >= PUD_SIZE)
380 shift = PUD_SHIFT;
381 else if (size >= PMD_SIZE)
382 shift = PMD_SHIFT;
383 else
384 shift = PAGE_SHIFT;
385
386 nptes = size >> shift;
387 orig_shift = pte_none(entry) ? PAGE_SHIFT : huge_tte_to_shift(entry);
388
389 if (pte_present(entry))
390 mm->context.hugetlb_pte_count -= nptes;
391
392 addr &= ~(size - 1);
393 for (i = 0; i < nptes; i++)
394 ptep[i] = __pte(0UL);
395
396 maybe_tlb_batch_add(mm, addr, ptep, entry, 0, orig_shift);
397 /* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */
398 if (size == HPAGE_SIZE)
399 maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, entry, 0,
400 orig_shift);
401
402 return entry;
403 }
404
405 int pmd_huge(pmd_t pmd)
406 {
407 return !pmd_none(pmd) &&
408 (pmd_val(pmd) & (_PAGE_VALID|_PAGE_PMD_HUGE)) != _PAGE_VALID;
409 }
410
411 int pud_huge(pud_t pud)
412 {
413 return !pud_none(pud) &&
414 (pud_val(pud) & (_PAGE_VALID|_PAGE_PUD_HUGE)) != _PAGE_VALID;
415 }
416
417 static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
418 unsigned long addr)
419 {
420 pgtable_t token = pmd_pgtable(*pmd);
421
422 pmd_clear(pmd);
423 pte_free_tlb(tlb, token, addr);
424 mm_dec_nr_ptes(tlb->mm);
425 }
426
427 static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
428 unsigned long addr, unsigned long end,
429 unsigned long floor, unsigned long ceiling)
430 {
431 pmd_t *pmd;
432 unsigned long next;
433 unsigned long start;
434
435 start = addr;
436 pmd = pmd_offset(pud, addr);
437 do {
438 next = pmd_addr_end(addr, end);
439 if (pmd_none(*pmd))
440 continue;
441 if (is_hugetlb_pmd(*pmd))
442 pmd_clear(pmd);
443 else
444 hugetlb_free_pte_range(tlb, pmd, addr);
445 } while (pmd++, addr = next, addr != end);
446
447 start &= PUD_MASK;
448 if (start < floor)
449 return;
450 if (ceiling) {
451 ceiling &= PUD_MASK;
452 if (!ceiling)
453 return;
454 }
455 if (end - 1 > ceiling - 1)
456 return;
457
458 pmd = pmd_offset(pud, start);
459 pud_clear(pud);
460 pmd_free_tlb(tlb, pmd, start);
461 mm_dec_nr_pmds(tlb->mm);
462 }
463
464 static void hugetlb_free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
465 unsigned long addr, unsigned long end,
466 unsigned long floor, unsigned long ceiling)
467 {
468 pud_t *pud;
469 unsigned long next;
470 unsigned long start;
471
472 start = addr;
473 pud = pud_offset(p4d, addr);
474 do {
475 next = pud_addr_end(addr, end);
476 if (pud_none_or_clear_bad(pud))
477 continue;
478 if (is_hugetlb_pud(*pud))
479 pud_clear(pud);
480 else
481 hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
482 ceiling);
483 } while (pud++, addr = next, addr != end);
484
485 start &= PGDIR_MASK;
486 if (start < floor)
487 return;
488 if (ceiling) {
489 ceiling &= PGDIR_MASK;
490 if (!ceiling)
491 return;
492 }
493 if (end - 1 > ceiling - 1)
494 return;
495
496 pud = pud_offset(p4d, start);
497 p4d_clear(p4d);
498 pud_free_tlb(tlb, pud, start);
499 mm_dec_nr_puds(tlb->mm);
500 }
501
502 void hugetlb_free_pgd_range(struct mmu_gather *tlb,
503 unsigned long addr, unsigned long end,
504 unsigned long floor, unsigned long ceiling)
505 {
506 pgd_t *pgd;
507 p4d_t *p4d;
508 unsigned long next;
509
510 addr &= PMD_MASK;
511 if (addr < floor) {
512 addr += PMD_SIZE;
513 if (!addr)
514 return;
515 }
516 if (ceiling) {
517 ceiling &= PMD_MASK;
518 if (!ceiling)
519 return;
520 }
521 if (end - 1 > ceiling - 1)
522 end -= PMD_SIZE;
523 if (addr > end - 1)
524 return;
525
526 pgd = pgd_offset(tlb->mm, addr);
527 p4d = p4d_offset(pgd, addr);
528 do {
529 next = p4d_addr_end(addr, end);
530 if (p4d_none_or_clear_bad(p4d))
531 continue;
532 hugetlb_free_pud_range(tlb, p4d, addr, next, floor, ceiling);
533 } while (p4d++, addr = next, addr != end);
534 }